perf_counter.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. /*
  2. * Performance counter support - powerpc architecture code
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/perf_counter.h>
  14. #include <linux/percpu.h>
  15. #include <linux/hardirq.h>
  16. #include <asm/reg.h>
  17. #include <asm/pmc.h>
  18. #include <asm/machdep.h>
  19. #include <asm/firmware.h>
  20. #include <asm/ptrace.h>
  21. struct cpu_hw_counters {
  22. int n_counters;
  23. int n_percpu;
  24. int disabled;
  25. int n_added;
  26. int n_limited;
  27. u8 pmcs_enabled;
  28. struct perf_counter *counter[MAX_HWCOUNTERS];
  29. u64 events[MAX_HWCOUNTERS];
  30. unsigned int flags[MAX_HWCOUNTERS];
  31. u64 mmcr[3];
  32. struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
  33. u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  34. };
  35. DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
  36. struct power_pmu *ppmu;
  37. /*
  38. * Normally, to ignore kernel events we set the FCS (freeze counters
  39. * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  40. * hypervisor bit set in the MSR, or if we are running on a processor
  41. * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  42. * then we need to use the FCHV bit to ignore kernel events.
  43. */
  44. static unsigned int freeze_counters_kernel = MMCR0_FCS;
  45. static void perf_counter_interrupt(struct pt_regs *regs);
  46. void perf_counter_print_debug(void)
  47. {
  48. }
  49. /*
  50. * Read one performance monitor counter (PMC).
  51. */
  52. static unsigned long read_pmc(int idx)
  53. {
  54. unsigned long val;
  55. switch (idx) {
  56. case 1:
  57. val = mfspr(SPRN_PMC1);
  58. break;
  59. case 2:
  60. val = mfspr(SPRN_PMC2);
  61. break;
  62. case 3:
  63. val = mfspr(SPRN_PMC3);
  64. break;
  65. case 4:
  66. val = mfspr(SPRN_PMC4);
  67. break;
  68. case 5:
  69. val = mfspr(SPRN_PMC5);
  70. break;
  71. case 6:
  72. val = mfspr(SPRN_PMC6);
  73. break;
  74. case 7:
  75. val = mfspr(SPRN_PMC7);
  76. break;
  77. case 8:
  78. val = mfspr(SPRN_PMC8);
  79. break;
  80. default:
  81. printk(KERN_ERR "oops trying to read PMC%d\n", idx);
  82. val = 0;
  83. }
  84. return val;
  85. }
  86. /*
  87. * Write one PMC.
  88. */
  89. static void write_pmc(int idx, unsigned long val)
  90. {
  91. switch (idx) {
  92. case 1:
  93. mtspr(SPRN_PMC1, val);
  94. break;
  95. case 2:
  96. mtspr(SPRN_PMC2, val);
  97. break;
  98. case 3:
  99. mtspr(SPRN_PMC3, val);
  100. break;
  101. case 4:
  102. mtspr(SPRN_PMC4, val);
  103. break;
  104. case 5:
  105. mtspr(SPRN_PMC5, val);
  106. break;
  107. case 6:
  108. mtspr(SPRN_PMC6, val);
  109. break;
  110. case 7:
  111. mtspr(SPRN_PMC7, val);
  112. break;
  113. case 8:
  114. mtspr(SPRN_PMC8, val);
  115. break;
  116. default:
  117. printk(KERN_ERR "oops trying to write PMC%d\n", idx);
  118. }
  119. }
  120. /*
  121. * Check if a set of events can all go on the PMU at once.
  122. * If they can't, this will look at alternative codes for the events
  123. * and see if any combination of alternative codes is feasible.
  124. * The feasible set is returned in event[].
  125. */
  126. static int power_check_constraints(u64 event[], unsigned int cflags[],
  127. int n_ev)
  128. {
  129. u64 mask, value, nv;
  130. u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  131. u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  132. u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  133. u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
  134. int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
  135. int i, j;
  136. u64 addf = ppmu->add_fields;
  137. u64 tadd = ppmu->test_adder;
  138. if (n_ev > ppmu->n_counter)
  139. return -1;
  140. /* First see if the events will go on as-is */
  141. for (i = 0; i < n_ev; ++i) {
  142. if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
  143. && !ppmu->limited_pmc_event(event[i])) {
  144. ppmu->get_alternatives(event[i], cflags[i],
  145. alternatives[i]);
  146. event[i] = alternatives[i][0];
  147. }
  148. if (ppmu->get_constraint(event[i], &amasks[i][0],
  149. &avalues[i][0]))
  150. return -1;
  151. }
  152. value = mask = 0;
  153. for (i = 0; i < n_ev; ++i) {
  154. nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
  155. if ((((nv + tadd) ^ value) & mask) != 0 ||
  156. (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
  157. break;
  158. value = nv;
  159. mask |= amasks[i][0];
  160. }
  161. if (i == n_ev)
  162. return 0; /* all OK */
  163. /* doesn't work, gather alternatives... */
  164. if (!ppmu->get_alternatives)
  165. return -1;
  166. for (i = 0; i < n_ev; ++i) {
  167. choice[i] = 0;
  168. n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
  169. alternatives[i]);
  170. for (j = 1; j < n_alt[i]; ++j)
  171. ppmu->get_constraint(alternatives[i][j],
  172. &amasks[i][j], &avalues[i][j]);
  173. }
  174. /* enumerate all possibilities and see if any will work */
  175. i = 0;
  176. j = -1;
  177. value = mask = nv = 0;
  178. while (i < n_ev) {
  179. if (j >= 0) {
  180. /* we're backtracking, restore context */
  181. value = svalues[i];
  182. mask = smasks[i];
  183. j = choice[i];
  184. }
  185. /*
  186. * See if any alternative k for event i,
  187. * where k > j, will satisfy the constraints.
  188. */
  189. while (++j < n_alt[i]) {
  190. nv = (value | avalues[i][j]) +
  191. (value & avalues[i][j] & addf);
  192. if ((((nv + tadd) ^ value) & mask) == 0 &&
  193. (((nv + tadd) ^ avalues[i][j])
  194. & amasks[i][j]) == 0)
  195. break;
  196. }
  197. if (j >= n_alt[i]) {
  198. /*
  199. * No feasible alternative, backtrack
  200. * to event i-1 and continue enumerating its
  201. * alternatives from where we got up to.
  202. */
  203. if (--i < 0)
  204. return -1;
  205. } else {
  206. /*
  207. * Found a feasible alternative for event i,
  208. * remember where we got up to with this event,
  209. * go on to the next event, and start with
  210. * the first alternative for it.
  211. */
  212. choice[i] = j;
  213. svalues[i] = value;
  214. smasks[i] = mask;
  215. value = nv;
  216. mask |= amasks[i][j];
  217. ++i;
  218. j = -1;
  219. }
  220. }
  221. /* OK, we have a feasible combination, tell the caller the solution */
  222. for (i = 0; i < n_ev; ++i)
  223. event[i] = alternatives[i][choice[i]];
  224. return 0;
  225. }
  226. /*
  227. * Check if newly-added counters have consistent settings for
  228. * exclude_{user,kernel,hv} with each other and any previously
  229. * added counters.
  230. */
  231. static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
  232. int n_prev, int n_new)
  233. {
  234. int eu = 0, ek = 0, eh = 0;
  235. int i, n, first;
  236. struct perf_counter *counter;
  237. n = n_prev + n_new;
  238. if (n <= 1)
  239. return 0;
  240. first = 1;
  241. for (i = 0; i < n; ++i) {
  242. if (cflags[i] & PPMU_LIMITED_PMC_OK) {
  243. cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
  244. continue;
  245. }
  246. counter = ctrs[i];
  247. if (first) {
  248. eu = counter->hw_event.exclude_user;
  249. ek = counter->hw_event.exclude_kernel;
  250. eh = counter->hw_event.exclude_hv;
  251. first = 0;
  252. } else if (counter->hw_event.exclude_user != eu ||
  253. counter->hw_event.exclude_kernel != ek ||
  254. counter->hw_event.exclude_hv != eh) {
  255. return -EAGAIN;
  256. }
  257. }
  258. if (eu || ek || eh)
  259. for (i = 0; i < n; ++i)
  260. if (cflags[i] & PPMU_LIMITED_PMC_OK)
  261. cflags[i] |= PPMU_LIMITED_PMC_REQD;
  262. return 0;
  263. }
  264. static void power_pmu_read(struct perf_counter *counter)
  265. {
  266. long val, delta, prev;
  267. if (!counter->hw.idx)
  268. return;
  269. /*
  270. * Performance monitor interrupts come even when interrupts
  271. * are soft-disabled, as long as interrupts are hard-enabled.
  272. * Therefore we treat them like NMIs.
  273. */
  274. do {
  275. prev = atomic64_read(&counter->hw.prev_count);
  276. barrier();
  277. val = read_pmc(counter->hw.idx);
  278. } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
  279. /* The counters are only 32 bits wide */
  280. delta = (val - prev) & 0xfffffffful;
  281. atomic64_add(delta, &counter->count);
  282. atomic64_sub(delta, &counter->hw.period_left);
  283. }
  284. /*
  285. * On some machines, PMC5 and PMC6 can't be written, don't respect
  286. * the freeze conditions, and don't generate interrupts. This tells
  287. * us if `counter' is using such a PMC.
  288. */
  289. static int is_limited_pmc(int pmcnum)
  290. {
  291. return (ppmu->flags & PPMU_LIMITED_PMC5_6)
  292. && (pmcnum == 5 || pmcnum == 6);
  293. }
  294. static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
  295. unsigned long pmc5, unsigned long pmc6)
  296. {
  297. struct perf_counter *counter;
  298. u64 val, prev, delta;
  299. int i;
  300. for (i = 0; i < cpuhw->n_limited; ++i) {
  301. counter = cpuhw->limited_counter[i];
  302. if (!counter->hw.idx)
  303. continue;
  304. val = (counter->hw.idx == 5) ? pmc5 : pmc6;
  305. prev = atomic64_read(&counter->hw.prev_count);
  306. counter->hw.idx = 0;
  307. delta = (val - prev) & 0xfffffffful;
  308. atomic64_add(delta, &counter->count);
  309. }
  310. }
  311. static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
  312. unsigned long pmc5, unsigned long pmc6)
  313. {
  314. struct perf_counter *counter;
  315. u64 val;
  316. int i;
  317. for (i = 0; i < cpuhw->n_limited; ++i) {
  318. counter = cpuhw->limited_counter[i];
  319. counter->hw.idx = cpuhw->limited_hwidx[i];
  320. val = (counter->hw.idx == 5) ? pmc5 : pmc6;
  321. atomic64_set(&counter->hw.prev_count, val);
  322. perf_counter_update_userpage(counter);
  323. }
  324. }
  325. /*
  326. * Since limited counters don't respect the freeze conditions, we
  327. * have to read them immediately after freezing or unfreezing the
  328. * other counters. We try to keep the values from the limited
  329. * counters as consistent as possible by keeping the delay (in
  330. * cycles and instructions) between freezing/unfreezing and reading
  331. * the limited counters as small and consistent as possible.
  332. * Therefore, if any limited counters are in use, we read them
  333. * both, and always in the same order, to minimize variability,
  334. * and do it inside the same asm that writes MMCR0.
  335. */
  336. static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
  337. {
  338. unsigned long pmc5, pmc6;
  339. if (!cpuhw->n_limited) {
  340. mtspr(SPRN_MMCR0, mmcr0);
  341. return;
  342. }
  343. /*
  344. * Write MMCR0, then read PMC5 and PMC6 immediately.
  345. */
  346. asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
  347. : "=&r" (pmc5), "=&r" (pmc6)
  348. : "r" (mmcr0), "i" (SPRN_MMCR0),
  349. "i" (SPRN_PMC5), "i" (SPRN_PMC6));
  350. if (mmcr0 & MMCR0_FC)
  351. freeze_limited_counters(cpuhw, pmc5, pmc6);
  352. else
  353. thaw_limited_counters(cpuhw, pmc5, pmc6);
  354. }
  355. /*
  356. * Disable all counters to prevent PMU interrupts and to allow
  357. * counters to be added or removed.
  358. */
  359. void hw_perf_disable(void)
  360. {
  361. struct cpu_hw_counters *cpuhw;
  362. unsigned long ret;
  363. unsigned long flags;
  364. local_irq_save(flags);
  365. cpuhw = &__get_cpu_var(cpu_hw_counters);
  366. ret = cpuhw->disabled;
  367. if (!ret) {
  368. cpuhw->disabled = 1;
  369. cpuhw->n_added = 0;
  370. /*
  371. * Check if we ever enabled the PMU on this cpu.
  372. */
  373. if (!cpuhw->pmcs_enabled) {
  374. if (ppc_md.enable_pmcs)
  375. ppc_md.enable_pmcs();
  376. cpuhw->pmcs_enabled = 1;
  377. }
  378. /*
  379. * Disable instruction sampling if it was enabled
  380. */
  381. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  382. mtspr(SPRN_MMCRA,
  383. cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  384. mb();
  385. }
  386. /*
  387. * Set the 'freeze counters' bit.
  388. * The barrier is to make sure the mtspr has been
  389. * executed and the PMU has frozen the counters
  390. * before we return.
  391. */
  392. write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
  393. mb();
  394. }
  395. local_irq_restore(flags);
  396. }
  397. /*
  398. * Re-enable all counters if disable == 0.
  399. * If we were previously disabled and counters were added, then
  400. * put the new config on the PMU.
  401. */
  402. void hw_perf_enable(void)
  403. {
  404. struct perf_counter *counter;
  405. struct cpu_hw_counters *cpuhw;
  406. unsigned long flags;
  407. long i;
  408. unsigned long val;
  409. s64 left;
  410. unsigned int hwc_index[MAX_HWCOUNTERS];
  411. int n_lim;
  412. int idx;
  413. local_irq_save(flags);
  414. if (!cpuhw->disabled) {
  415. local_irq_restore(flags);
  416. return;
  417. }
  418. cpuhw = &__get_cpu_var(cpu_hw_counters);
  419. cpuhw->disabled = 0;
  420. /*
  421. * If we didn't change anything, or only removed counters,
  422. * no need to recalculate MMCR* settings and reset the PMCs.
  423. * Just reenable the PMU with the current MMCR* settings
  424. * (possibly updated for removal of counters).
  425. */
  426. if (!cpuhw->n_added) {
  427. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  428. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  429. if (cpuhw->n_counters == 0)
  430. get_lppaca()->pmcregs_in_use = 0;
  431. goto out_enable;
  432. }
  433. /*
  434. * Compute MMCR* values for the new set of counters
  435. */
  436. if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
  437. cpuhw->mmcr)) {
  438. /* shouldn't ever get here */
  439. printk(KERN_ERR "oops compute_mmcr failed\n");
  440. goto out;
  441. }
  442. /*
  443. * Add in MMCR0 freeze bits corresponding to the
  444. * hw_event.exclude_* bits for the first counter.
  445. * We have already checked that all counters have the
  446. * same values for these bits as the first counter.
  447. */
  448. counter = cpuhw->counter[0];
  449. if (counter->hw_event.exclude_user)
  450. cpuhw->mmcr[0] |= MMCR0_FCP;
  451. if (counter->hw_event.exclude_kernel)
  452. cpuhw->mmcr[0] |= freeze_counters_kernel;
  453. if (counter->hw_event.exclude_hv)
  454. cpuhw->mmcr[0] |= MMCR0_FCHV;
  455. /*
  456. * Write the new configuration to MMCR* with the freeze
  457. * bit set and set the hardware counters to their initial values.
  458. * Then unfreeze the counters.
  459. */
  460. get_lppaca()->pmcregs_in_use = 1;
  461. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  462. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  463. mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
  464. | MMCR0_FC);
  465. /*
  466. * Read off any pre-existing counters that need to move
  467. * to another PMC.
  468. */
  469. for (i = 0; i < cpuhw->n_counters; ++i) {
  470. counter = cpuhw->counter[i];
  471. if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
  472. power_pmu_read(counter);
  473. write_pmc(counter->hw.idx, 0);
  474. counter->hw.idx = 0;
  475. }
  476. }
  477. /*
  478. * Initialize the PMCs for all the new and moved counters.
  479. */
  480. cpuhw->n_limited = n_lim = 0;
  481. for (i = 0; i < cpuhw->n_counters; ++i) {
  482. counter = cpuhw->counter[i];
  483. if (counter->hw.idx)
  484. continue;
  485. idx = hwc_index[i] + 1;
  486. if (is_limited_pmc(idx)) {
  487. cpuhw->limited_counter[n_lim] = counter;
  488. cpuhw->limited_hwidx[n_lim] = idx;
  489. ++n_lim;
  490. continue;
  491. }
  492. val = 0;
  493. if (counter->hw.irq_period) {
  494. left = atomic64_read(&counter->hw.period_left);
  495. if (left < 0x80000000L)
  496. val = 0x80000000L - left;
  497. }
  498. atomic64_set(&counter->hw.prev_count, val);
  499. counter->hw.idx = idx;
  500. write_pmc(idx, val);
  501. perf_counter_update_userpage(counter);
  502. }
  503. cpuhw->n_limited = n_lim;
  504. cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
  505. out_enable:
  506. mb();
  507. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  508. /*
  509. * Enable instruction sampling if necessary
  510. */
  511. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  512. mb();
  513. mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
  514. }
  515. out:
  516. local_irq_restore(flags);
  517. }
  518. static int collect_events(struct perf_counter *group, int max_count,
  519. struct perf_counter *ctrs[], u64 *events,
  520. unsigned int *flags)
  521. {
  522. int n = 0;
  523. struct perf_counter *counter;
  524. if (!is_software_counter(group)) {
  525. if (n >= max_count)
  526. return -1;
  527. ctrs[n] = group;
  528. flags[n] = group->hw.counter_base;
  529. events[n++] = group->hw.config;
  530. }
  531. list_for_each_entry(counter, &group->sibling_list, list_entry) {
  532. if (!is_software_counter(counter) &&
  533. counter->state != PERF_COUNTER_STATE_OFF) {
  534. if (n >= max_count)
  535. return -1;
  536. ctrs[n] = counter;
  537. flags[n] = counter->hw.counter_base;
  538. events[n++] = counter->hw.config;
  539. }
  540. }
  541. return n;
  542. }
  543. static void counter_sched_in(struct perf_counter *counter, int cpu)
  544. {
  545. counter->state = PERF_COUNTER_STATE_ACTIVE;
  546. counter->oncpu = cpu;
  547. counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
  548. if (is_software_counter(counter))
  549. counter->pmu->enable(counter);
  550. }
  551. /*
  552. * Called to enable a whole group of counters.
  553. * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
  554. * Assumes the caller has disabled interrupts and has
  555. * frozen the PMU with hw_perf_save_disable.
  556. */
  557. int hw_perf_group_sched_in(struct perf_counter *group_leader,
  558. struct perf_cpu_context *cpuctx,
  559. struct perf_counter_context *ctx, int cpu)
  560. {
  561. struct cpu_hw_counters *cpuhw;
  562. long i, n, n0;
  563. struct perf_counter *sub;
  564. cpuhw = &__get_cpu_var(cpu_hw_counters);
  565. n0 = cpuhw->n_counters;
  566. n = collect_events(group_leader, ppmu->n_counter - n0,
  567. &cpuhw->counter[n0], &cpuhw->events[n0],
  568. &cpuhw->flags[n0]);
  569. if (n < 0)
  570. return -EAGAIN;
  571. if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
  572. return -EAGAIN;
  573. i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
  574. if (i < 0)
  575. return -EAGAIN;
  576. cpuhw->n_counters = n0 + n;
  577. cpuhw->n_added += n;
  578. /*
  579. * OK, this group can go on; update counter states etc.,
  580. * and enable any software counters
  581. */
  582. for (i = n0; i < n0 + n; ++i)
  583. cpuhw->counter[i]->hw.config = cpuhw->events[i];
  584. cpuctx->active_oncpu += n;
  585. n = 1;
  586. counter_sched_in(group_leader, cpu);
  587. list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
  588. if (sub->state != PERF_COUNTER_STATE_OFF) {
  589. counter_sched_in(sub, cpu);
  590. ++n;
  591. }
  592. }
  593. ctx->nr_active += n;
  594. return 1;
  595. }
  596. /*
  597. * Add a counter to the PMU.
  598. * If all counters are not already frozen, then we disable and
  599. * re-enable the PMU in order to get hw_perf_enable to do the
  600. * actual work of reconfiguring the PMU.
  601. */
  602. static int power_pmu_enable(struct perf_counter *counter)
  603. {
  604. struct cpu_hw_counters *cpuhw;
  605. unsigned long flags;
  606. int n0;
  607. int ret = -EAGAIN;
  608. local_irq_save(flags);
  609. perf_disable();
  610. /*
  611. * Add the counter to the list (if there is room)
  612. * and check whether the total set is still feasible.
  613. */
  614. cpuhw = &__get_cpu_var(cpu_hw_counters);
  615. n0 = cpuhw->n_counters;
  616. if (n0 >= ppmu->n_counter)
  617. goto out;
  618. cpuhw->counter[n0] = counter;
  619. cpuhw->events[n0] = counter->hw.config;
  620. cpuhw->flags[n0] = counter->hw.counter_base;
  621. if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
  622. goto out;
  623. if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
  624. goto out;
  625. counter->hw.config = cpuhw->events[n0];
  626. ++cpuhw->n_counters;
  627. ++cpuhw->n_added;
  628. ret = 0;
  629. out:
  630. perf_enable();
  631. local_irq_restore(flags);
  632. return ret;
  633. }
  634. /*
  635. * Remove a counter from the PMU.
  636. */
  637. static void power_pmu_disable(struct perf_counter *counter)
  638. {
  639. struct cpu_hw_counters *cpuhw;
  640. long i;
  641. unsigned long flags;
  642. local_irq_save(flags);
  643. perf_disable();
  644. power_pmu_read(counter);
  645. cpuhw = &__get_cpu_var(cpu_hw_counters);
  646. for (i = 0; i < cpuhw->n_counters; ++i) {
  647. if (counter == cpuhw->counter[i]) {
  648. while (++i < cpuhw->n_counters)
  649. cpuhw->counter[i-1] = cpuhw->counter[i];
  650. --cpuhw->n_counters;
  651. ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
  652. if (counter->hw.idx) {
  653. write_pmc(counter->hw.idx, 0);
  654. counter->hw.idx = 0;
  655. }
  656. perf_counter_update_userpage(counter);
  657. break;
  658. }
  659. }
  660. for (i = 0; i < cpuhw->n_limited; ++i)
  661. if (counter == cpuhw->limited_counter[i])
  662. break;
  663. if (i < cpuhw->n_limited) {
  664. while (++i < cpuhw->n_limited) {
  665. cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
  666. cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
  667. }
  668. --cpuhw->n_limited;
  669. }
  670. if (cpuhw->n_counters == 0) {
  671. /* disable exceptions if no counters are running */
  672. cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
  673. }
  674. perf_enable();
  675. local_irq_restore(flags);
  676. }
  677. struct pmu power_pmu = {
  678. .enable = power_pmu_enable,
  679. .disable = power_pmu_disable,
  680. .read = power_pmu_read,
  681. };
  682. /*
  683. * Return 1 if we might be able to put counter on a limited PMC,
  684. * or 0 if not.
  685. * A counter can only go on a limited PMC if it counts something
  686. * that a limited PMC can count, doesn't require interrupts, and
  687. * doesn't exclude any processor mode.
  688. */
  689. static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
  690. unsigned int flags)
  691. {
  692. int n;
  693. u64 alt[MAX_EVENT_ALTERNATIVES];
  694. if (counter->hw_event.exclude_user
  695. || counter->hw_event.exclude_kernel
  696. || counter->hw_event.exclude_hv
  697. || counter->hw_event.irq_period)
  698. return 0;
  699. if (ppmu->limited_pmc_event(ev))
  700. return 1;
  701. /*
  702. * The requested event isn't on a limited PMC already;
  703. * see if any alternative code goes on a limited PMC.
  704. */
  705. if (!ppmu->get_alternatives)
  706. return 0;
  707. flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
  708. n = ppmu->get_alternatives(ev, flags, alt);
  709. return n > 0;
  710. }
  711. /*
  712. * Find an alternative event that goes on a normal PMC, if possible,
  713. * and return the event code, or 0 if there is no such alternative.
  714. * (Note: event code 0 is "don't count" on all machines.)
  715. */
  716. static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
  717. {
  718. u64 alt[MAX_EVENT_ALTERNATIVES];
  719. int n;
  720. flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
  721. n = ppmu->get_alternatives(ev, flags, alt);
  722. if (!n)
  723. return 0;
  724. return alt[0];
  725. }
  726. /* Number of perf_counters counting hardware events */
  727. static atomic_t num_counters;
  728. /* Used to avoid races in calling reserve/release_pmc_hardware */
  729. static DEFINE_MUTEX(pmc_reserve_mutex);
  730. /*
  731. * Release the PMU if this is the last perf_counter.
  732. */
  733. static void hw_perf_counter_destroy(struct perf_counter *counter)
  734. {
  735. if (!atomic_add_unless(&num_counters, -1, 1)) {
  736. mutex_lock(&pmc_reserve_mutex);
  737. if (atomic_dec_return(&num_counters) == 0)
  738. release_pmc_hardware();
  739. mutex_unlock(&pmc_reserve_mutex);
  740. }
  741. }
  742. const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  743. {
  744. u64 ev;
  745. unsigned long flags;
  746. struct perf_counter *ctrs[MAX_HWCOUNTERS];
  747. u64 events[MAX_HWCOUNTERS];
  748. unsigned int cflags[MAX_HWCOUNTERS];
  749. int n;
  750. int err;
  751. if (!ppmu)
  752. return ERR_PTR(-ENXIO);
  753. if (!perf_event_raw(&counter->hw_event)) {
  754. ev = perf_event_id(&counter->hw_event);
  755. if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
  756. return ERR_PTR(-EOPNOTSUPP);
  757. ev = ppmu->generic_events[ev];
  758. } else {
  759. ev = perf_event_config(&counter->hw_event);
  760. }
  761. counter->hw.config_base = ev;
  762. counter->hw.idx = 0;
  763. /*
  764. * If we are not running on a hypervisor, force the
  765. * exclude_hv bit to 0 so that we don't care what
  766. * the user set it to.
  767. */
  768. if (!firmware_has_feature(FW_FEATURE_LPAR))
  769. counter->hw_event.exclude_hv = 0;
  770. /*
  771. * If this is a per-task counter, then we can use
  772. * PM_RUN_* events interchangeably with their non RUN_*
  773. * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
  774. * XXX we should check if the task is an idle task.
  775. */
  776. flags = 0;
  777. if (counter->ctx->task)
  778. flags |= PPMU_ONLY_COUNT_RUN;
  779. /*
  780. * If this machine has limited counters, check whether this
  781. * event could go on a limited counter.
  782. */
  783. if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
  784. if (can_go_on_limited_pmc(counter, ev, flags)) {
  785. flags |= PPMU_LIMITED_PMC_OK;
  786. } else if (ppmu->limited_pmc_event(ev)) {
  787. /*
  788. * The requested event is on a limited PMC,
  789. * but we can't use a limited PMC; see if any
  790. * alternative goes on a normal PMC.
  791. */
  792. ev = normal_pmc_alternative(ev, flags);
  793. if (!ev)
  794. return ERR_PTR(-EINVAL);
  795. }
  796. }
  797. /*
  798. * If this is in a group, check if it can go on with all the
  799. * other hardware counters in the group. We assume the counter
  800. * hasn't been linked into its leader's sibling list at this point.
  801. */
  802. n = 0;
  803. if (counter->group_leader != counter) {
  804. n = collect_events(counter->group_leader, ppmu->n_counter - 1,
  805. ctrs, events, cflags);
  806. if (n < 0)
  807. return ERR_PTR(-EINVAL);
  808. }
  809. events[n] = ev;
  810. ctrs[n] = counter;
  811. cflags[n] = flags;
  812. if (check_excludes(ctrs, cflags, n, 1))
  813. return ERR_PTR(-EINVAL);
  814. if (power_check_constraints(events, cflags, n + 1))
  815. return ERR_PTR(-EINVAL);
  816. counter->hw.config = events[n];
  817. counter->hw.counter_base = cflags[n];
  818. atomic64_set(&counter->hw.period_left, counter->hw.irq_period);
  819. /*
  820. * See if we need to reserve the PMU.
  821. * If no counters are currently in use, then we have to take a
  822. * mutex to ensure that we don't race with another task doing
  823. * reserve_pmc_hardware or release_pmc_hardware.
  824. */
  825. err = 0;
  826. if (!atomic_inc_not_zero(&num_counters)) {
  827. mutex_lock(&pmc_reserve_mutex);
  828. if (atomic_read(&num_counters) == 0 &&
  829. reserve_pmc_hardware(perf_counter_interrupt))
  830. err = -EBUSY;
  831. else
  832. atomic_inc(&num_counters);
  833. mutex_unlock(&pmc_reserve_mutex);
  834. }
  835. counter->destroy = hw_perf_counter_destroy;
  836. if (err)
  837. return ERR_PTR(err);
  838. return &power_pmu;
  839. }
  840. /*
  841. * A counter has overflowed; update its count and record
  842. * things if requested. Note that interrupts are hard-disabled
  843. * here so there is no possibility of being interrupted.
  844. */
  845. static void record_and_restart(struct perf_counter *counter, long val,
  846. struct pt_regs *regs, int nmi)
  847. {
  848. u64 period = counter->hw.irq_period;
  849. s64 prev, delta, left;
  850. int record = 0;
  851. u64 addr, mmcra, sdsync;
  852. /* we don't have to worry about interrupts here */
  853. prev = atomic64_read(&counter->hw.prev_count);
  854. delta = (val - prev) & 0xfffffffful;
  855. atomic64_add(delta, &counter->count);
  856. /*
  857. * See if the total period for this counter has expired,
  858. * and update for the next period.
  859. */
  860. val = 0;
  861. left = atomic64_read(&counter->hw.period_left) - delta;
  862. if (period) {
  863. if (left <= 0) {
  864. left += period;
  865. if (left <= 0)
  866. left = period;
  867. record = 1;
  868. }
  869. if (left < 0x80000000L)
  870. val = 0x80000000L - left;
  871. }
  872. write_pmc(counter->hw.idx, val);
  873. atomic64_set(&counter->hw.prev_count, val);
  874. atomic64_set(&counter->hw.period_left, left);
  875. perf_counter_update_userpage(counter);
  876. /*
  877. * Finally record data if requested.
  878. */
  879. if (record) {
  880. addr = 0;
  881. if (counter->hw_event.record_type & PERF_RECORD_ADDR) {
  882. /*
  883. * The user wants a data address recorded.
  884. * If we're not doing instruction sampling,
  885. * give them the SDAR (sampled data address).
  886. * If we are doing instruction sampling, then only
  887. * give them the SDAR if it corresponds to the
  888. * instruction pointed to by SIAR; this is indicated
  889. * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
  890. */
  891. mmcra = regs->dsisr;
  892. sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
  893. POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
  894. if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
  895. addr = mfspr(SPRN_SDAR);
  896. }
  897. perf_counter_overflow(counter, nmi, regs, addr);
  898. }
  899. }
  900. /*
  901. * Called from generic code to get the misc flags (i.e. processor mode)
  902. * for an event.
  903. */
  904. unsigned long perf_misc_flags(struct pt_regs *regs)
  905. {
  906. unsigned long mmcra;
  907. if (TRAP(regs) != 0xf00) {
  908. /* not a PMU interrupt */
  909. return user_mode(regs) ? PERF_EVENT_MISC_USER :
  910. PERF_EVENT_MISC_KERNEL;
  911. }
  912. mmcra = regs->dsisr;
  913. if (ppmu->flags & PPMU_ALT_SIPR) {
  914. if (mmcra & POWER6_MMCRA_SIHV)
  915. return PERF_EVENT_MISC_HYPERVISOR;
  916. return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
  917. PERF_EVENT_MISC_KERNEL;
  918. }
  919. if (mmcra & MMCRA_SIHV)
  920. return PERF_EVENT_MISC_HYPERVISOR;
  921. return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
  922. PERF_EVENT_MISC_KERNEL;
  923. }
  924. /*
  925. * Called from generic code to get the instruction pointer
  926. * for an event.
  927. */
  928. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  929. {
  930. unsigned long mmcra;
  931. unsigned long ip;
  932. unsigned long slot;
  933. if (TRAP(regs) != 0xf00)
  934. return regs->nip; /* not a PMU interrupt */
  935. ip = mfspr(SPRN_SIAR);
  936. mmcra = regs->dsisr;
  937. if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
  938. slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
  939. if (slot > 1)
  940. ip += 4 * (slot - 1);
  941. }
  942. return ip;
  943. }
  944. /*
  945. * Performance monitor interrupt stuff
  946. */
  947. static void perf_counter_interrupt(struct pt_regs *regs)
  948. {
  949. int i;
  950. struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
  951. struct perf_counter *counter;
  952. long val;
  953. int found = 0;
  954. int nmi;
  955. if (cpuhw->n_limited)
  956. freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
  957. mfspr(SPRN_PMC6));
  958. /*
  959. * Overload regs->dsisr to store MMCRA so we only need to read it once.
  960. */
  961. regs->dsisr = mfspr(SPRN_MMCRA);
  962. /*
  963. * If interrupts were soft-disabled when this PMU interrupt
  964. * occurred, treat it as an NMI.
  965. */
  966. nmi = !regs->softe;
  967. if (nmi)
  968. nmi_enter();
  969. else
  970. irq_enter();
  971. for (i = 0; i < cpuhw->n_counters; ++i) {
  972. counter = cpuhw->counter[i];
  973. if (is_limited_pmc(counter->hw.idx))
  974. continue;
  975. val = read_pmc(counter->hw.idx);
  976. if ((int)val < 0) {
  977. /* counter has overflowed */
  978. found = 1;
  979. record_and_restart(counter, val, regs, nmi);
  980. }
  981. }
  982. /*
  983. * In case we didn't find and reset the counter that caused
  984. * the interrupt, scan all counters and reset any that are
  985. * negative, to avoid getting continual interrupts.
  986. * Any that we processed in the previous loop will not be negative.
  987. */
  988. if (!found) {
  989. for (i = 0; i < ppmu->n_counter; ++i) {
  990. if (is_limited_pmc(i + 1))
  991. continue;
  992. val = read_pmc(i + 1);
  993. if ((int)val < 0)
  994. write_pmc(i + 1, 0);
  995. }
  996. }
  997. /*
  998. * Reset MMCR0 to its normal value. This will set PMXE and
  999. * clear FC (freeze counters) and PMAO (perf mon alert occurred)
  1000. * and thus allow interrupts to occur again.
  1001. * XXX might want to use MSR.PM to keep the counters frozen until
  1002. * we get back out of this interrupt.
  1003. */
  1004. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  1005. if (nmi)
  1006. nmi_exit();
  1007. else
  1008. irq_exit();
  1009. }
  1010. void hw_perf_counter_setup(int cpu)
  1011. {
  1012. struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
  1013. memset(cpuhw, 0, sizeof(*cpuhw));
  1014. cpuhw->mmcr[0] = MMCR0_FC;
  1015. }
  1016. extern struct power_pmu power4_pmu;
  1017. extern struct power_pmu ppc970_pmu;
  1018. extern struct power_pmu power5_pmu;
  1019. extern struct power_pmu power5p_pmu;
  1020. extern struct power_pmu power6_pmu;
  1021. static int init_perf_counters(void)
  1022. {
  1023. unsigned long pvr;
  1024. /* XXX should get this from cputable */
  1025. pvr = mfspr(SPRN_PVR);
  1026. switch (PVR_VER(pvr)) {
  1027. case PV_POWER4:
  1028. case PV_POWER4p:
  1029. ppmu = &power4_pmu;
  1030. break;
  1031. case PV_970:
  1032. case PV_970FX:
  1033. case PV_970MP:
  1034. ppmu = &ppc970_pmu;
  1035. break;
  1036. case PV_POWER5:
  1037. ppmu = &power5_pmu;
  1038. break;
  1039. case PV_POWER5p:
  1040. ppmu = &power5p_pmu;
  1041. break;
  1042. case 0x3e:
  1043. ppmu = &power6_pmu;
  1044. break;
  1045. }
  1046. /*
  1047. * Use FCHV to ignore kernel events if MSR.HV is set.
  1048. */
  1049. if (mfmsr() & MSR_HV)
  1050. freeze_counters_kernel = MMCR0_FCHV;
  1051. return 0;
  1052. }
  1053. arch_initcall(init_perf_counters);