perf_counter.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233
  1. /*
  2. * Performance counter support - powerpc architecture code
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/perf_counter.h>
  14. #include <linux/percpu.h>
  15. #include <linux/hardirq.h>
  16. #include <asm/reg.h>
  17. #include <asm/pmc.h>
  18. #include <asm/machdep.h>
  19. #include <asm/firmware.h>
  20. #include <asm/ptrace.h>
  21. struct cpu_hw_counters {
  22. int n_counters;
  23. int n_percpu;
  24. int disabled;
  25. int n_added;
  26. int n_limited;
  27. u8 pmcs_enabled;
  28. struct perf_counter *counter[MAX_HWCOUNTERS];
  29. u64 events[MAX_HWCOUNTERS];
  30. unsigned int flags[MAX_HWCOUNTERS];
  31. unsigned long mmcr[3];
  32. struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
  33. u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  34. };
  35. DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
  36. struct power_pmu *ppmu;
  37. /*
  38. * Normally, to ignore kernel events we set the FCS (freeze counters
  39. * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  40. * hypervisor bit set in the MSR, or if we are running on a processor
  41. * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  42. * then we need to use the FCHV bit to ignore kernel events.
  43. */
  44. static unsigned int freeze_counters_kernel = MMCR0_FCS;
  45. static void perf_counter_interrupt(struct pt_regs *regs);
  46. void perf_counter_print_debug(void)
  47. {
  48. }
  49. /*
  50. * Read one performance monitor counter (PMC).
  51. */
  52. static unsigned long read_pmc(int idx)
  53. {
  54. unsigned long val;
  55. switch (idx) {
  56. case 1:
  57. val = mfspr(SPRN_PMC1);
  58. break;
  59. case 2:
  60. val = mfspr(SPRN_PMC2);
  61. break;
  62. case 3:
  63. val = mfspr(SPRN_PMC3);
  64. break;
  65. case 4:
  66. val = mfspr(SPRN_PMC4);
  67. break;
  68. case 5:
  69. val = mfspr(SPRN_PMC5);
  70. break;
  71. case 6:
  72. val = mfspr(SPRN_PMC6);
  73. break;
  74. case 7:
  75. val = mfspr(SPRN_PMC7);
  76. break;
  77. case 8:
  78. val = mfspr(SPRN_PMC8);
  79. break;
  80. default:
  81. printk(KERN_ERR "oops trying to read PMC%d\n", idx);
  82. val = 0;
  83. }
  84. return val;
  85. }
  86. /*
  87. * Write one PMC.
  88. */
  89. static void write_pmc(int idx, unsigned long val)
  90. {
  91. switch (idx) {
  92. case 1:
  93. mtspr(SPRN_PMC1, val);
  94. break;
  95. case 2:
  96. mtspr(SPRN_PMC2, val);
  97. break;
  98. case 3:
  99. mtspr(SPRN_PMC3, val);
  100. break;
  101. case 4:
  102. mtspr(SPRN_PMC4, val);
  103. break;
  104. case 5:
  105. mtspr(SPRN_PMC5, val);
  106. break;
  107. case 6:
  108. mtspr(SPRN_PMC6, val);
  109. break;
  110. case 7:
  111. mtspr(SPRN_PMC7, val);
  112. break;
  113. case 8:
  114. mtspr(SPRN_PMC8, val);
  115. break;
  116. default:
  117. printk(KERN_ERR "oops trying to write PMC%d\n", idx);
  118. }
  119. }
  120. /*
  121. * Check if a set of events can all go on the PMU at once.
  122. * If they can't, this will look at alternative codes for the events
  123. * and see if any combination of alternative codes is feasible.
  124. * The feasible set is returned in event[].
  125. */
  126. static int power_check_constraints(u64 event[], unsigned int cflags[],
  127. int n_ev)
  128. {
  129. unsigned long mask, value, nv;
  130. u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  131. unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  132. unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  133. unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
  134. int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
  135. int i, j;
  136. unsigned long addf = ppmu->add_fields;
  137. unsigned long tadd = ppmu->test_adder;
  138. if (n_ev > ppmu->n_counter)
  139. return -1;
  140. /* First see if the events will go on as-is */
  141. for (i = 0; i < n_ev; ++i) {
  142. if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
  143. && !ppmu->limited_pmc_event(event[i])) {
  144. ppmu->get_alternatives(event[i], cflags[i],
  145. alternatives[i]);
  146. event[i] = alternatives[i][0];
  147. }
  148. if (ppmu->get_constraint(event[i], &amasks[i][0],
  149. &avalues[i][0]))
  150. return -1;
  151. }
  152. value = mask = 0;
  153. for (i = 0; i < n_ev; ++i) {
  154. nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
  155. if ((((nv + tadd) ^ value) & mask) != 0 ||
  156. (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
  157. break;
  158. value = nv;
  159. mask |= amasks[i][0];
  160. }
  161. if (i == n_ev)
  162. return 0; /* all OK */
  163. /* doesn't work, gather alternatives... */
  164. if (!ppmu->get_alternatives)
  165. return -1;
  166. for (i = 0; i < n_ev; ++i) {
  167. choice[i] = 0;
  168. n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
  169. alternatives[i]);
  170. for (j = 1; j < n_alt[i]; ++j)
  171. ppmu->get_constraint(alternatives[i][j],
  172. &amasks[i][j], &avalues[i][j]);
  173. }
  174. /* enumerate all possibilities and see if any will work */
  175. i = 0;
  176. j = -1;
  177. value = mask = nv = 0;
  178. while (i < n_ev) {
  179. if (j >= 0) {
  180. /* we're backtracking, restore context */
  181. value = svalues[i];
  182. mask = smasks[i];
  183. j = choice[i];
  184. }
  185. /*
  186. * See if any alternative k for event i,
  187. * where k > j, will satisfy the constraints.
  188. */
  189. while (++j < n_alt[i]) {
  190. nv = (value | avalues[i][j]) +
  191. (value & avalues[i][j] & addf);
  192. if ((((nv + tadd) ^ value) & mask) == 0 &&
  193. (((nv + tadd) ^ avalues[i][j])
  194. & amasks[i][j]) == 0)
  195. break;
  196. }
  197. if (j >= n_alt[i]) {
  198. /*
  199. * No feasible alternative, backtrack
  200. * to event i-1 and continue enumerating its
  201. * alternatives from where we got up to.
  202. */
  203. if (--i < 0)
  204. return -1;
  205. } else {
  206. /*
  207. * Found a feasible alternative for event i,
  208. * remember where we got up to with this event,
  209. * go on to the next event, and start with
  210. * the first alternative for it.
  211. */
  212. choice[i] = j;
  213. svalues[i] = value;
  214. smasks[i] = mask;
  215. value = nv;
  216. mask |= amasks[i][j];
  217. ++i;
  218. j = -1;
  219. }
  220. }
  221. /* OK, we have a feasible combination, tell the caller the solution */
  222. for (i = 0; i < n_ev; ++i)
  223. event[i] = alternatives[i][choice[i]];
  224. return 0;
  225. }
  226. /*
  227. * Check if newly-added counters have consistent settings for
  228. * exclude_{user,kernel,hv} with each other and any previously
  229. * added counters.
  230. */
  231. static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
  232. int n_prev, int n_new)
  233. {
  234. int eu = 0, ek = 0, eh = 0;
  235. int i, n, first;
  236. struct perf_counter *counter;
  237. n = n_prev + n_new;
  238. if (n <= 1)
  239. return 0;
  240. first = 1;
  241. for (i = 0; i < n; ++i) {
  242. if (cflags[i] & PPMU_LIMITED_PMC_OK) {
  243. cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
  244. continue;
  245. }
  246. counter = ctrs[i];
  247. if (first) {
  248. eu = counter->attr.exclude_user;
  249. ek = counter->attr.exclude_kernel;
  250. eh = counter->attr.exclude_hv;
  251. first = 0;
  252. } else if (counter->attr.exclude_user != eu ||
  253. counter->attr.exclude_kernel != ek ||
  254. counter->attr.exclude_hv != eh) {
  255. return -EAGAIN;
  256. }
  257. }
  258. if (eu || ek || eh)
  259. for (i = 0; i < n; ++i)
  260. if (cflags[i] & PPMU_LIMITED_PMC_OK)
  261. cflags[i] |= PPMU_LIMITED_PMC_REQD;
  262. return 0;
  263. }
  264. static void power_pmu_read(struct perf_counter *counter)
  265. {
  266. long val, delta, prev;
  267. if (!counter->hw.idx)
  268. return;
  269. /*
  270. * Performance monitor interrupts come even when interrupts
  271. * are soft-disabled, as long as interrupts are hard-enabled.
  272. * Therefore we treat them like NMIs.
  273. */
  274. do {
  275. prev = atomic64_read(&counter->hw.prev_count);
  276. barrier();
  277. val = read_pmc(counter->hw.idx);
  278. } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
  279. /* The counters are only 32 bits wide */
  280. delta = (val - prev) & 0xfffffffful;
  281. atomic64_add(delta, &counter->count);
  282. atomic64_sub(delta, &counter->hw.period_left);
  283. }
  284. /*
  285. * On some machines, PMC5 and PMC6 can't be written, don't respect
  286. * the freeze conditions, and don't generate interrupts. This tells
  287. * us if `counter' is using such a PMC.
  288. */
  289. static int is_limited_pmc(int pmcnum)
  290. {
  291. return (ppmu->flags & PPMU_LIMITED_PMC5_6)
  292. && (pmcnum == 5 || pmcnum == 6);
  293. }
  294. static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
  295. unsigned long pmc5, unsigned long pmc6)
  296. {
  297. struct perf_counter *counter;
  298. u64 val, prev, delta;
  299. int i;
  300. for (i = 0; i < cpuhw->n_limited; ++i) {
  301. counter = cpuhw->limited_counter[i];
  302. if (!counter->hw.idx)
  303. continue;
  304. val = (counter->hw.idx == 5) ? pmc5 : pmc6;
  305. prev = atomic64_read(&counter->hw.prev_count);
  306. counter->hw.idx = 0;
  307. delta = (val - prev) & 0xfffffffful;
  308. atomic64_add(delta, &counter->count);
  309. }
  310. }
  311. static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
  312. unsigned long pmc5, unsigned long pmc6)
  313. {
  314. struct perf_counter *counter;
  315. u64 val;
  316. int i;
  317. for (i = 0; i < cpuhw->n_limited; ++i) {
  318. counter = cpuhw->limited_counter[i];
  319. counter->hw.idx = cpuhw->limited_hwidx[i];
  320. val = (counter->hw.idx == 5) ? pmc5 : pmc6;
  321. atomic64_set(&counter->hw.prev_count, val);
  322. perf_counter_update_userpage(counter);
  323. }
  324. }
  325. /*
  326. * Since limited counters don't respect the freeze conditions, we
  327. * have to read them immediately after freezing or unfreezing the
  328. * other counters. We try to keep the values from the limited
  329. * counters as consistent as possible by keeping the delay (in
  330. * cycles and instructions) between freezing/unfreezing and reading
  331. * the limited counters as small and consistent as possible.
  332. * Therefore, if any limited counters are in use, we read them
  333. * both, and always in the same order, to minimize variability,
  334. * and do it inside the same asm that writes MMCR0.
  335. */
  336. static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
  337. {
  338. unsigned long pmc5, pmc6;
  339. if (!cpuhw->n_limited) {
  340. mtspr(SPRN_MMCR0, mmcr0);
  341. return;
  342. }
  343. /*
  344. * Write MMCR0, then read PMC5 and PMC6 immediately.
  345. * To ensure we don't get a performance monitor interrupt
  346. * between writing MMCR0 and freezing/thawing the limited
  347. * counters, we first write MMCR0 with the counter overflow
  348. * interrupt enable bits turned off.
  349. */
  350. asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
  351. : "=&r" (pmc5), "=&r" (pmc6)
  352. : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
  353. "i" (SPRN_MMCR0),
  354. "i" (SPRN_PMC5), "i" (SPRN_PMC6));
  355. if (mmcr0 & MMCR0_FC)
  356. freeze_limited_counters(cpuhw, pmc5, pmc6);
  357. else
  358. thaw_limited_counters(cpuhw, pmc5, pmc6);
  359. /*
  360. * Write the full MMCR0 including the counter overflow interrupt
  361. * enable bits, if necessary.
  362. */
  363. if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
  364. mtspr(SPRN_MMCR0, mmcr0);
  365. }
  366. /*
  367. * Disable all counters to prevent PMU interrupts and to allow
  368. * counters to be added or removed.
  369. */
  370. void hw_perf_disable(void)
  371. {
  372. struct cpu_hw_counters *cpuhw;
  373. unsigned long flags;
  374. local_irq_save(flags);
  375. cpuhw = &__get_cpu_var(cpu_hw_counters);
  376. if (!cpuhw->disabled) {
  377. cpuhw->disabled = 1;
  378. cpuhw->n_added = 0;
  379. /*
  380. * Check if we ever enabled the PMU on this cpu.
  381. */
  382. if (!cpuhw->pmcs_enabled) {
  383. if (ppc_md.enable_pmcs)
  384. ppc_md.enable_pmcs();
  385. cpuhw->pmcs_enabled = 1;
  386. }
  387. /*
  388. * Disable instruction sampling if it was enabled
  389. */
  390. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  391. mtspr(SPRN_MMCRA,
  392. cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  393. mb();
  394. }
  395. /*
  396. * Set the 'freeze counters' bit.
  397. * The barrier is to make sure the mtspr has been
  398. * executed and the PMU has frozen the counters
  399. * before we return.
  400. */
  401. write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
  402. mb();
  403. }
  404. local_irq_restore(flags);
  405. }
  406. /*
  407. * Re-enable all counters if disable == 0.
  408. * If we were previously disabled and counters were added, then
  409. * put the new config on the PMU.
  410. */
  411. void hw_perf_enable(void)
  412. {
  413. struct perf_counter *counter;
  414. struct cpu_hw_counters *cpuhw;
  415. unsigned long flags;
  416. long i;
  417. unsigned long val;
  418. s64 left;
  419. unsigned int hwc_index[MAX_HWCOUNTERS];
  420. int n_lim;
  421. int idx;
  422. local_irq_save(flags);
  423. cpuhw = &__get_cpu_var(cpu_hw_counters);
  424. if (!cpuhw->disabled) {
  425. local_irq_restore(flags);
  426. return;
  427. }
  428. cpuhw->disabled = 0;
  429. /*
  430. * If we didn't change anything, or only removed counters,
  431. * no need to recalculate MMCR* settings and reset the PMCs.
  432. * Just reenable the PMU with the current MMCR* settings
  433. * (possibly updated for removal of counters).
  434. */
  435. if (!cpuhw->n_added) {
  436. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  437. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  438. if (cpuhw->n_counters == 0)
  439. get_lppaca()->pmcregs_in_use = 0;
  440. goto out_enable;
  441. }
  442. /*
  443. * Compute MMCR* values for the new set of counters
  444. */
  445. if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
  446. cpuhw->mmcr)) {
  447. /* shouldn't ever get here */
  448. printk(KERN_ERR "oops compute_mmcr failed\n");
  449. goto out;
  450. }
  451. /*
  452. * Add in MMCR0 freeze bits corresponding to the
  453. * attr.exclude_* bits for the first counter.
  454. * We have already checked that all counters have the
  455. * same values for these bits as the first counter.
  456. */
  457. counter = cpuhw->counter[0];
  458. if (counter->attr.exclude_user)
  459. cpuhw->mmcr[0] |= MMCR0_FCP;
  460. if (counter->attr.exclude_kernel)
  461. cpuhw->mmcr[0] |= freeze_counters_kernel;
  462. if (counter->attr.exclude_hv)
  463. cpuhw->mmcr[0] |= MMCR0_FCHV;
  464. /*
  465. * Write the new configuration to MMCR* with the freeze
  466. * bit set and set the hardware counters to their initial values.
  467. * Then unfreeze the counters.
  468. */
  469. get_lppaca()->pmcregs_in_use = 1;
  470. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  471. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  472. mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
  473. | MMCR0_FC);
  474. /*
  475. * Read off any pre-existing counters that need to move
  476. * to another PMC.
  477. */
  478. for (i = 0; i < cpuhw->n_counters; ++i) {
  479. counter = cpuhw->counter[i];
  480. if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
  481. power_pmu_read(counter);
  482. write_pmc(counter->hw.idx, 0);
  483. counter->hw.idx = 0;
  484. }
  485. }
  486. /*
  487. * Initialize the PMCs for all the new and moved counters.
  488. */
  489. cpuhw->n_limited = n_lim = 0;
  490. for (i = 0; i < cpuhw->n_counters; ++i) {
  491. counter = cpuhw->counter[i];
  492. if (counter->hw.idx)
  493. continue;
  494. idx = hwc_index[i] + 1;
  495. if (is_limited_pmc(idx)) {
  496. cpuhw->limited_counter[n_lim] = counter;
  497. cpuhw->limited_hwidx[n_lim] = idx;
  498. ++n_lim;
  499. continue;
  500. }
  501. val = 0;
  502. if (counter->hw.sample_period) {
  503. left = atomic64_read(&counter->hw.period_left);
  504. if (left < 0x80000000L)
  505. val = 0x80000000L - left;
  506. }
  507. atomic64_set(&counter->hw.prev_count, val);
  508. counter->hw.idx = idx;
  509. write_pmc(idx, val);
  510. perf_counter_update_userpage(counter);
  511. }
  512. cpuhw->n_limited = n_lim;
  513. cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
  514. out_enable:
  515. mb();
  516. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  517. /*
  518. * Enable instruction sampling if necessary
  519. */
  520. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  521. mb();
  522. mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
  523. }
  524. out:
  525. local_irq_restore(flags);
  526. }
  527. static int collect_events(struct perf_counter *group, int max_count,
  528. struct perf_counter *ctrs[], u64 *events,
  529. unsigned int *flags)
  530. {
  531. int n = 0;
  532. struct perf_counter *counter;
  533. if (!is_software_counter(group)) {
  534. if (n >= max_count)
  535. return -1;
  536. ctrs[n] = group;
  537. flags[n] = group->hw.counter_base;
  538. events[n++] = group->hw.config;
  539. }
  540. list_for_each_entry(counter, &group->sibling_list, list_entry) {
  541. if (!is_software_counter(counter) &&
  542. counter->state != PERF_COUNTER_STATE_OFF) {
  543. if (n >= max_count)
  544. return -1;
  545. ctrs[n] = counter;
  546. flags[n] = counter->hw.counter_base;
  547. events[n++] = counter->hw.config;
  548. }
  549. }
  550. return n;
  551. }
  552. static void counter_sched_in(struct perf_counter *counter, int cpu)
  553. {
  554. counter->state = PERF_COUNTER_STATE_ACTIVE;
  555. counter->oncpu = cpu;
  556. counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
  557. if (is_software_counter(counter))
  558. counter->pmu->enable(counter);
  559. }
  560. /*
  561. * Called to enable a whole group of counters.
  562. * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
  563. * Assumes the caller has disabled interrupts and has
  564. * frozen the PMU with hw_perf_save_disable.
  565. */
  566. int hw_perf_group_sched_in(struct perf_counter *group_leader,
  567. struct perf_cpu_context *cpuctx,
  568. struct perf_counter_context *ctx, int cpu)
  569. {
  570. struct cpu_hw_counters *cpuhw;
  571. long i, n, n0;
  572. struct perf_counter *sub;
  573. cpuhw = &__get_cpu_var(cpu_hw_counters);
  574. n0 = cpuhw->n_counters;
  575. n = collect_events(group_leader, ppmu->n_counter - n0,
  576. &cpuhw->counter[n0], &cpuhw->events[n0],
  577. &cpuhw->flags[n0]);
  578. if (n < 0)
  579. return -EAGAIN;
  580. if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
  581. return -EAGAIN;
  582. i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
  583. if (i < 0)
  584. return -EAGAIN;
  585. cpuhw->n_counters = n0 + n;
  586. cpuhw->n_added += n;
  587. /*
  588. * OK, this group can go on; update counter states etc.,
  589. * and enable any software counters
  590. */
  591. for (i = n0; i < n0 + n; ++i)
  592. cpuhw->counter[i]->hw.config = cpuhw->events[i];
  593. cpuctx->active_oncpu += n;
  594. n = 1;
  595. counter_sched_in(group_leader, cpu);
  596. list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
  597. if (sub->state != PERF_COUNTER_STATE_OFF) {
  598. counter_sched_in(sub, cpu);
  599. ++n;
  600. }
  601. }
  602. ctx->nr_active += n;
  603. return 1;
  604. }
  605. /*
  606. * Add a counter to the PMU.
  607. * If all counters are not already frozen, then we disable and
  608. * re-enable the PMU in order to get hw_perf_enable to do the
  609. * actual work of reconfiguring the PMU.
  610. */
  611. static int power_pmu_enable(struct perf_counter *counter)
  612. {
  613. struct cpu_hw_counters *cpuhw;
  614. unsigned long flags;
  615. int n0;
  616. int ret = -EAGAIN;
  617. local_irq_save(flags);
  618. perf_disable();
  619. /*
  620. * Add the counter to the list (if there is room)
  621. * and check whether the total set is still feasible.
  622. */
  623. cpuhw = &__get_cpu_var(cpu_hw_counters);
  624. n0 = cpuhw->n_counters;
  625. if (n0 >= ppmu->n_counter)
  626. goto out;
  627. cpuhw->counter[n0] = counter;
  628. cpuhw->events[n0] = counter->hw.config;
  629. cpuhw->flags[n0] = counter->hw.counter_base;
  630. if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
  631. goto out;
  632. if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
  633. goto out;
  634. counter->hw.config = cpuhw->events[n0];
  635. ++cpuhw->n_counters;
  636. ++cpuhw->n_added;
  637. ret = 0;
  638. out:
  639. perf_enable();
  640. local_irq_restore(flags);
  641. return ret;
  642. }
  643. /*
  644. * Remove a counter from the PMU.
  645. */
  646. static void power_pmu_disable(struct perf_counter *counter)
  647. {
  648. struct cpu_hw_counters *cpuhw;
  649. long i;
  650. unsigned long flags;
  651. local_irq_save(flags);
  652. perf_disable();
  653. power_pmu_read(counter);
  654. cpuhw = &__get_cpu_var(cpu_hw_counters);
  655. for (i = 0; i < cpuhw->n_counters; ++i) {
  656. if (counter == cpuhw->counter[i]) {
  657. while (++i < cpuhw->n_counters)
  658. cpuhw->counter[i-1] = cpuhw->counter[i];
  659. --cpuhw->n_counters;
  660. ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
  661. if (counter->hw.idx) {
  662. write_pmc(counter->hw.idx, 0);
  663. counter->hw.idx = 0;
  664. }
  665. perf_counter_update_userpage(counter);
  666. break;
  667. }
  668. }
  669. for (i = 0; i < cpuhw->n_limited; ++i)
  670. if (counter == cpuhw->limited_counter[i])
  671. break;
  672. if (i < cpuhw->n_limited) {
  673. while (++i < cpuhw->n_limited) {
  674. cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
  675. cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
  676. }
  677. --cpuhw->n_limited;
  678. }
  679. if (cpuhw->n_counters == 0) {
  680. /* disable exceptions if no counters are running */
  681. cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
  682. }
  683. perf_enable();
  684. local_irq_restore(flags);
  685. }
  686. /*
  687. * Re-enable interrupts on a counter after they were throttled
  688. * because they were coming too fast.
  689. */
  690. static void power_pmu_unthrottle(struct perf_counter *counter)
  691. {
  692. s64 val, left;
  693. unsigned long flags;
  694. if (!counter->hw.idx || !counter->hw.sample_period)
  695. return;
  696. local_irq_save(flags);
  697. perf_disable();
  698. power_pmu_read(counter);
  699. left = counter->hw.sample_period;
  700. counter->hw.last_period = left;
  701. val = 0;
  702. if (left < 0x80000000L)
  703. val = 0x80000000L - left;
  704. write_pmc(counter->hw.idx, val);
  705. atomic64_set(&counter->hw.prev_count, val);
  706. atomic64_set(&counter->hw.period_left, left);
  707. perf_counter_update_userpage(counter);
  708. perf_enable();
  709. local_irq_restore(flags);
  710. }
  711. struct pmu power_pmu = {
  712. .enable = power_pmu_enable,
  713. .disable = power_pmu_disable,
  714. .read = power_pmu_read,
  715. .unthrottle = power_pmu_unthrottle,
  716. };
  717. /*
  718. * Return 1 if we might be able to put counter on a limited PMC,
  719. * or 0 if not.
  720. * A counter can only go on a limited PMC if it counts something
  721. * that a limited PMC can count, doesn't require interrupts, and
  722. * doesn't exclude any processor mode.
  723. */
  724. static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
  725. unsigned int flags)
  726. {
  727. int n;
  728. u64 alt[MAX_EVENT_ALTERNATIVES];
  729. if (counter->attr.exclude_user
  730. || counter->attr.exclude_kernel
  731. || counter->attr.exclude_hv
  732. || counter->attr.sample_period)
  733. return 0;
  734. if (ppmu->limited_pmc_event(ev))
  735. return 1;
  736. /*
  737. * The requested event isn't on a limited PMC already;
  738. * see if any alternative code goes on a limited PMC.
  739. */
  740. if (!ppmu->get_alternatives)
  741. return 0;
  742. flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
  743. n = ppmu->get_alternatives(ev, flags, alt);
  744. return n > 0;
  745. }
  746. /*
  747. * Find an alternative event that goes on a normal PMC, if possible,
  748. * and return the event code, or 0 if there is no such alternative.
  749. * (Note: event code 0 is "don't count" on all machines.)
  750. */
  751. static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
  752. {
  753. u64 alt[MAX_EVENT_ALTERNATIVES];
  754. int n;
  755. flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
  756. n = ppmu->get_alternatives(ev, flags, alt);
  757. if (!n)
  758. return 0;
  759. return alt[0];
  760. }
  761. /* Number of perf_counters counting hardware events */
  762. static atomic_t num_counters;
  763. /* Used to avoid races in calling reserve/release_pmc_hardware */
  764. static DEFINE_MUTEX(pmc_reserve_mutex);
  765. /*
  766. * Release the PMU if this is the last perf_counter.
  767. */
  768. static void hw_perf_counter_destroy(struct perf_counter *counter)
  769. {
  770. if (!atomic_add_unless(&num_counters, -1, 1)) {
  771. mutex_lock(&pmc_reserve_mutex);
  772. if (atomic_dec_return(&num_counters) == 0)
  773. release_pmc_hardware();
  774. mutex_unlock(&pmc_reserve_mutex);
  775. }
  776. }
  777. /*
  778. * Translate a generic cache event config to a raw event code.
  779. */
  780. static int hw_perf_cache_event(u64 config, u64 *eventp)
  781. {
  782. unsigned long type, op, result;
  783. int ev;
  784. if (!ppmu->cache_events)
  785. return -EINVAL;
  786. /* unpack config */
  787. type = config & 0xff;
  788. op = (config >> 8) & 0xff;
  789. result = (config >> 16) & 0xff;
  790. if (type >= PERF_COUNT_HW_CACHE_MAX ||
  791. op >= PERF_COUNT_HW_CACHE_OP_MAX ||
  792. result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  793. return -EINVAL;
  794. ev = (*ppmu->cache_events)[type][op][result];
  795. if (ev == 0)
  796. return -EOPNOTSUPP;
  797. if (ev == -1)
  798. return -EINVAL;
  799. *eventp = ev;
  800. return 0;
  801. }
  802. const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  803. {
  804. u64 ev;
  805. unsigned long flags;
  806. struct perf_counter *ctrs[MAX_HWCOUNTERS];
  807. u64 events[MAX_HWCOUNTERS];
  808. unsigned int cflags[MAX_HWCOUNTERS];
  809. int n;
  810. int err;
  811. if (!ppmu)
  812. return ERR_PTR(-ENXIO);
  813. switch (counter->attr.type) {
  814. case PERF_TYPE_HARDWARE:
  815. ev = counter->attr.config;
  816. if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
  817. return ERR_PTR(-EOPNOTSUPP);
  818. ev = ppmu->generic_events[ev];
  819. break;
  820. case PERF_TYPE_HW_CACHE:
  821. err = hw_perf_cache_event(counter->attr.config, &ev);
  822. if (err)
  823. return ERR_PTR(err);
  824. break;
  825. case PERF_TYPE_RAW:
  826. ev = counter->attr.config;
  827. break;
  828. default:
  829. return ERR_PTR(-EINVAL);
  830. }
  831. counter->hw.config_base = ev;
  832. counter->hw.idx = 0;
  833. /*
  834. * If we are not running on a hypervisor, force the
  835. * exclude_hv bit to 0 so that we don't care what
  836. * the user set it to.
  837. */
  838. if (!firmware_has_feature(FW_FEATURE_LPAR))
  839. counter->attr.exclude_hv = 0;
  840. /*
  841. * If this is a per-task counter, then we can use
  842. * PM_RUN_* events interchangeably with their non RUN_*
  843. * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
  844. * XXX we should check if the task is an idle task.
  845. */
  846. flags = 0;
  847. if (counter->ctx->task)
  848. flags |= PPMU_ONLY_COUNT_RUN;
  849. /*
  850. * If this machine has limited counters, check whether this
  851. * event could go on a limited counter.
  852. */
  853. if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
  854. if (can_go_on_limited_pmc(counter, ev, flags)) {
  855. flags |= PPMU_LIMITED_PMC_OK;
  856. } else if (ppmu->limited_pmc_event(ev)) {
  857. /*
  858. * The requested event is on a limited PMC,
  859. * but we can't use a limited PMC; see if any
  860. * alternative goes on a normal PMC.
  861. */
  862. ev = normal_pmc_alternative(ev, flags);
  863. if (!ev)
  864. return ERR_PTR(-EINVAL);
  865. }
  866. }
  867. /*
  868. * If this is in a group, check if it can go on with all the
  869. * other hardware counters in the group. We assume the counter
  870. * hasn't been linked into its leader's sibling list at this point.
  871. */
  872. n = 0;
  873. if (counter->group_leader != counter) {
  874. n = collect_events(counter->group_leader, ppmu->n_counter - 1,
  875. ctrs, events, cflags);
  876. if (n < 0)
  877. return ERR_PTR(-EINVAL);
  878. }
  879. events[n] = ev;
  880. ctrs[n] = counter;
  881. cflags[n] = flags;
  882. if (check_excludes(ctrs, cflags, n, 1))
  883. return ERR_PTR(-EINVAL);
  884. if (power_check_constraints(events, cflags, n + 1))
  885. return ERR_PTR(-EINVAL);
  886. counter->hw.config = events[n];
  887. counter->hw.counter_base = cflags[n];
  888. counter->hw.last_period = counter->hw.sample_period;
  889. atomic64_set(&counter->hw.period_left, counter->hw.last_period);
  890. /*
  891. * See if we need to reserve the PMU.
  892. * If no counters are currently in use, then we have to take a
  893. * mutex to ensure that we don't race with another task doing
  894. * reserve_pmc_hardware or release_pmc_hardware.
  895. */
  896. err = 0;
  897. if (!atomic_inc_not_zero(&num_counters)) {
  898. mutex_lock(&pmc_reserve_mutex);
  899. if (atomic_read(&num_counters) == 0 &&
  900. reserve_pmc_hardware(perf_counter_interrupt))
  901. err = -EBUSY;
  902. else
  903. atomic_inc(&num_counters);
  904. mutex_unlock(&pmc_reserve_mutex);
  905. }
  906. counter->destroy = hw_perf_counter_destroy;
  907. if (err)
  908. return ERR_PTR(err);
  909. return &power_pmu;
  910. }
  911. /*
  912. * A counter has overflowed; update its count and record
  913. * things if requested. Note that interrupts are hard-disabled
  914. * here so there is no possibility of being interrupted.
  915. */
  916. static void record_and_restart(struct perf_counter *counter, long val,
  917. struct pt_regs *regs, int nmi)
  918. {
  919. u64 period = counter->hw.sample_period;
  920. unsigned long mmcra, sdsync;
  921. s64 prev, delta, left;
  922. int record = 0;
  923. /* we don't have to worry about interrupts here */
  924. prev = atomic64_read(&counter->hw.prev_count);
  925. delta = (val - prev) & 0xfffffffful;
  926. atomic64_add(delta, &counter->count);
  927. /*
  928. * See if the total period for this counter has expired,
  929. * and update for the next period.
  930. */
  931. val = 0;
  932. left = atomic64_read(&counter->hw.period_left) - delta;
  933. if (period) {
  934. if (left <= 0) {
  935. left += period;
  936. if (left <= 0)
  937. left = period;
  938. record = 1;
  939. }
  940. if (left < 0x80000000L)
  941. val = 0x80000000L - left;
  942. }
  943. /*
  944. * Finally record data if requested.
  945. */
  946. if (record) {
  947. struct perf_sample_data data = {
  948. .regs = regs,
  949. .addr = 0,
  950. .period = counter->hw.last_period,
  951. };
  952. if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
  953. /*
  954. * The user wants a data address recorded.
  955. * If we're not doing instruction sampling,
  956. * give them the SDAR (sampled data address).
  957. * If we are doing instruction sampling, then only
  958. * give them the SDAR if it corresponds to the
  959. * instruction pointed to by SIAR; this is indicated
  960. * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
  961. */
  962. mmcra = regs->dsisr;
  963. sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
  964. POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
  965. if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
  966. data.addr = mfspr(SPRN_SDAR);
  967. }
  968. if (perf_counter_overflow(counter, nmi, &data)) {
  969. /*
  970. * Interrupts are coming too fast - throttle them
  971. * by setting the counter to 0, so it will be
  972. * at least 2^30 cycles until the next interrupt
  973. * (assuming each counter counts at most 2 counts
  974. * per cycle).
  975. */
  976. val = 0;
  977. left = ~0ULL >> 1;
  978. }
  979. }
  980. write_pmc(counter->hw.idx, val);
  981. atomic64_set(&counter->hw.prev_count, val);
  982. atomic64_set(&counter->hw.period_left, left);
  983. perf_counter_update_userpage(counter);
  984. }
  985. /*
  986. * Called from generic code to get the misc flags (i.e. processor mode)
  987. * for an event.
  988. */
  989. unsigned long perf_misc_flags(struct pt_regs *regs)
  990. {
  991. unsigned long mmcra;
  992. if (TRAP(regs) != 0xf00) {
  993. /* not a PMU interrupt */
  994. return user_mode(regs) ? PERF_EVENT_MISC_USER :
  995. PERF_EVENT_MISC_KERNEL;
  996. }
  997. mmcra = regs->dsisr;
  998. if (ppmu->flags & PPMU_ALT_SIPR) {
  999. if (mmcra & POWER6_MMCRA_SIHV)
  1000. return PERF_EVENT_MISC_HYPERVISOR;
  1001. return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
  1002. PERF_EVENT_MISC_KERNEL;
  1003. }
  1004. if (mmcra & MMCRA_SIHV)
  1005. return PERF_EVENT_MISC_HYPERVISOR;
  1006. return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
  1007. PERF_EVENT_MISC_KERNEL;
  1008. }
  1009. /*
  1010. * Called from generic code to get the instruction pointer
  1011. * for an event.
  1012. */
  1013. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1014. {
  1015. unsigned long mmcra;
  1016. unsigned long ip;
  1017. unsigned long slot;
  1018. if (TRAP(regs) != 0xf00)
  1019. return regs->nip; /* not a PMU interrupt */
  1020. ip = mfspr(SPRN_SIAR);
  1021. mmcra = regs->dsisr;
  1022. if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
  1023. slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
  1024. if (slot > 1)
  1025. ip += 4 * (slot - 1);
  1026. }
  1027. return ip;
  1028. }
  1029. /*
  1030. * Performance monitor interrupt stuff
  1031. */
  1032. static void perf_counter_interrupt(struct pt_regs *regs)
  1033. {
  1034. int i;
  1035. struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
  1036. struct perf_counter *counter;
  1037. long val;
  1038. int found = 0;
  1039. int nmi;
  1040. if (cpuhw->n_limited)
  1041. freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
  1042. mfspr(SPRN_PMC6));
  1043. /*
  1044. * Overload regs->dsisr to store MMCRA so we only need to read it once.
  1045. */
  1046. regs->dsisr = mfspr(SPRN_MMCRA);
  1047. /*
  1048. * If interrupts were soft-disabled when this PMU interrupt
  1049. * occurred, treat it as an NMI.
  1050. */
  1051. nmi = !regs->softe;
  1052. if (nmi)
  1053. nmi_enter();
  1054. else
  1055. irq_enter();
  1056. for (i = 0; i < cpuhw->n_counters; ++i) {
  1057. counter = cpuhw->counter[i];
  1058. if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
  1059. continue;
  1060. val = read_pmc(counter->hw.idx);
  1061. if ((int)val < 0) {
  1062. /* counter has overflowed */
  1063. found = 1;
  1064. record_and_restart(counter, val, regs, nmi);
  1065. }
  1066. }
  1067. /*
  1068. * In case we didn't find and reset the counter that caused
  1069. * the interrupt, scan all counters and reset any that are
  1070. * negative, to avoid getting continual interrupts.
  1071. * Any that we processed in the previous loop will not be negative.
  1072. */
  1073. if (!found) {
  1074. for (i = 0; i < ppmu->n_counter; ++i) {
  1075. if (is_limited_pmc(i + 1))
  1076. continue;
  1077. val = read_pmc(i + 1);
  1078. if ((int)val < 0)
  1079. write_pmc(i + 1, 0);
  1080. }
  1081. }
  1082. /*
  1083. * Reset MMCR0 to its normal value. This will set PMXE and
  1084. * clear FC (freeze counters) and PMAO (perf mon alert occurred)
  1085. * and thus allow interrupts to occur again.
  1086. * XXX might want to use MSR.PM to keep the counters frozen until
  1087. * we get back out of this interrupt.
  1088. */
  1089. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  1090. if (nmi)
  1091. nmi_exit();
  1092. else
  1093. irq_exit();
  1094. }
  1095. void hw_perf_counter_setup(int cpu)
  1096. {
  1097. struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
  1098. memset(cpuhw, 0, sizeof(*cpuhw));
  1099. cpuhw->mmcr[0] = MMCR0_FC;
  1100. }
  1101. int register_power_pmu(struct power_pmu *pmu)
  1102. {
  1103. if (ppmu)
  1104. return -EBUSY; /* something's already registered */
  1105. ppmu = pmu;
  1106. pr_info("%s performance monitor hardware support registered\n",
  1107. pmu->name);
  1108. /*
  1109. * Use FCHV to ignore kernel events if MSR.HV is set.
  1110. */
  1111. if (mfmsr() & MSR_HV)
  1112. freeze_counters_kernel = MMCR0_FCHV;
  1113. return 0;
  1114. }