perf_counter.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214
  1. /*
  2. * Performance counter support - powerpc architecture code
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/perf_counter.h>
  14. #include <linux/percpu.h>
  15. #include <linux/hardirq.h>
  16. #include <asm/reg.h>
  17. #include <asm/pmc.h>
  18. #include <asm/machdep.h>
  19. #include <asm/firmware.h>
  20. #include <asm/ptrace.h>
  21. struct cpu_hw_counters {
  22. int n_counters;
  23. int n_percpu;
  24. int disabled;
  25. int n_added;
  26. int n_limited;
  27. u8 pmcs_enabled;
  28. struct perf_counter *counter[MAX_HWCOUNTERS];
  29. u64 events[MAX_HWCOUNTERS];
  30. unsigned int flags[MAX_HWCOUNTERS];
  31. u64 mmcr[3];
  32. struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
  33. u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  34. };
  35. DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
  36. struct power_pmu *ppmu;
  37. /*
  38. * Normally, to ignore kernel events we set the FCS (freeze counters
  39. * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  40. * hypervisor bit set in the MSR, or if we are running on a processor
  41. * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  42. * then we need to use the FCHV bit to ignore kernel events.
  43. */
  44. static unsigned int freeze_counters_kernel = MMCR0_FCS;
  45. static void perf_counter_interrupt(struct pt_regs *regs);
  46. void perf_counter_print_debug(void)
  47. {
  48. }
  49. /*
  50. * Read one performance monitor counter (PMC).
  51. */
  52. static unsigned long read_pmc(int idx)
  53. {
  54. unsigned long val;
  55. switch (idx) {
  56. case 1:
  57. val = mfspr(SPRN_PMC1);
  58. break;
  59. case 2:
  60. val = mfspr(SPRN_PMC2);
  61. break;
  62. case 3:
  63. val = mfspr(SPRN_PMC3);
  64. break;
  65. case 4:
  66. val = mfspr(SPRN_PMC4);
  67. break;
  68. case 5:
  69. val = mfspr(SPRN_PMC5);
  70. break;
  71. case 6:
  72. val = mfspr(SPRN_PMC6);
  73. break;
  74. case 7:
  75. val = mfspr(SPRN_PMC7);
  76. break;
  77. case 8:
  78. val = mfspr(SPRN_PMC8);
  79. break;
  80. default:
  81. printk(KERN_ERR "oops trying to read PMC%d\n", idx);
  82. val = 0;
  83. }
  84. return val;
  85. }
  86. /*
  87. * Write one PMC.
  88. */
  89. static void write_pmc(int idx, unsigned long val)
  90. {
  91. switch (idx) {
  92. case 1:
  93. mtspr(SPRN_PMC1, val);
  94. break;
  95. case 2:
  96. mtspr(SPRN_PMC2, val);
  97. break;
  98. case 3:
  99. mtspr(SPRN_PMC3, val);
  100. break;
  101. case 4:
  102. mtspr(SPRN_PMC4, val);
  103. break;
  104. case 5:
  105. mtspr(SPRN_PMC5, val);
  106. break;
  107. case 6:
  108. mtspr(SPRN_PMC6, val);
  109. break;
  110. case 7:
  111. mtspr(SPRN_PMC7, val);
  112. break;
  113. case 8:
  114. mtspr(SPRN_PMC8, val);
  115. break;
  116. default:
  117. printk(KERN_ERR "oops trying to write PMC%d\n", idx);
  118. }
  119. }
  120. /*
  121. * Check if a set of events can all go on the PMU at once.
  122. * If they can't, this will look at alternative codes for the events
  123. * and see if any combination of alternative codes is feasible.
  124. * The feasible set is returned in event[].
  125. */
  126. static int power_check_constraints(u64 event[], unsigned int cflags[],
  127. int n_ev)
  128. {
  129. u64 mask, value, nv;
  130. u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  131. u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  132. u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
  133. u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
  134. int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
  135. int i, j;
  136. u64 addf = ppmu->add_fields;
  137. u64 tadd = ppmu->test_adder;
  138. if (n_ev > ppmu->n_counter)
  139. return -1;
  140. /* First see if the events will go on as-is */
  141. for (i = 0; i < n_ev; ++i) {
  142. if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
  143. && !ppmu->limited_pmc_event(event[i])) {
  144. ppmu->get_alternatives(event[i], cflags[i],
  145. alternatives[i]);
  146. event[i] = alternatives[i][0];
  147. }
  148. if (ppmu->get_constraint(event[i], &amasks[i][0],
  149. &avalues[i][0]))
  150. return -1;
  151. }
  152. value = mask = 0;
  153. for (i = 0; i < n_ev; ++i) {
  154. nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
  155. if ((((nv + tadd) ^ value) & mask) != 0 ||
  156. (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
  157. break;
  158. value = nv;
  159. mask |= amasks[i][0];
  160. }
  161. if (i == n_ev)
  162. return 0; /* all OK */
  163. /* doesn't work, gather alternatives... */
  164. if (!ppmu->get_alternatives)
  165. return -1;
  166. for (i = 0; i < n_ev; ++i) {
  167. choice[i] = 0;
  168. n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
  169. alternatives[i]);
  170. for (j = 1; j < n_alt[i]; ++j)
  171. ppmu->get_constraint(alternatives[i][j],
  172. &amasks[i][j], &avalues[i][j]);
  173. }
  174. /* enumerate all possibilities and see if any will work */
  175. i = 0;
  176. j = -1;
  177. value = mask = nv = 0;
  178. while (i < n_ev) {
  179. if (j >= 0) {
  180. /* we're backtracking, restore context */
  181. value = svalues[i];
  182. mask = smasks[i];
  183. j = choice[i];
  184. }
  185. /*
  186. * See if any alternative k for event i,
  187. * where k > j, will satisfy the constraints.
  188. */
  189. while (++j < n_alt[i]) {
  190. nv = (value | avalues[i][j]) +
  191. (value & avalues[i][j] & addf);
  192. if ((((nv + tadd) ^ value) & mask) == 0 &&
  193. (((nv + tadd) ^ avalues[i][j])
  194. & amasks[i][j]) == 0)
  195. break;
  196. }
  197. if (j >= n_alt[i]) {
  198. /*
  199. * No feasible alternative, backtrack
  200. * to event i-1 and continue enumerating its
  201. * alternatives from where we got up to.
  202. */
  203. if (--i < 0)
  204. return -1;
  205. } else {
  206. /*
  207. * Found a feasible alternative for event i,
  208. * remember where we got up to with this event,
  209. * go on to the next event, and start with
  210. * the first alternative for it.
  211. */
  212. choice[i] = j;
  213. svalues[i] = value;
  214. smasks[i] = mask;
  215. value = nv;
  216. mask |= amasks[i][j];
  217. ++i;
  218. j = -1;
  219. }
  220. }
  221. /* OK, we have a feasible combination, tell the caller the solution */
  222. for (i = 0; i < n_ev; ++i)
  223. event[i] = alternatives[i][choice[i]];
  224. return 0;
  225. }
  226. /*
  227. * Check if newly-added counters have consistent settings for
  228. * exclude_{user,kernel,hv} with each other and any previously
  229. * added counters.
  230. */
  231. static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
  232. int n_prev, int n_new)
  233. {
  234. int eu = 0, ek = 0, eh = 0;
  235. int i, n, first;
  236. struct perf_counter *counter;
  237. n = n_prev + n_new;
  238. if (n <= 1)
  239. return 0;
  240. first = 1;
  241. for (i = 0; i < n; ++i) {
  242. if (cflags[i] & PPMU_LIMITED_PMC_OK) {
  243. cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
  244. continue;
  245. }
  246. counter = ctrs[i];
  247. if (first) {
  248. eu = counter->attr.exclude_user;
  249. ek = counter->attr.exclude_kernel;
  250. eh = counter->attr.exclude_hv;
  251. first = 0;
  252. } else if (counter->attr.exclude_user != eu ||
  253. counter->attr.exclude_kernel != ek ||
  254. counter->attr.exclude_hv != eh) {
  255. return -EAGAIN;
  256. }
  257. }
  258. if (eu || ek || eh)
  259. for (i = 0; i < n; ++i)
  260. if (cflags[i] & PPMU_LIMITED_PMC_OK)
  261. cflags[i] |= PPMU_LIMITED_PMC_REQD;
  262. return 0;
  263. }
  264. static void power_pmu_read(struct perf_counter *counter)
  265. {
  266. long val, delta, prev;
  267. if (!counter->hw.idx)
  268. return;
  269. /*
  270. * Performance monitor interrupts come even when interrupts
  271. * are soft-disabled, as long as interrupts are hard-enabled.
  272. * Therefore we treat them like NMIs.
  273. */
  274. do {
  275. prev = atomic64_read(&counter->hw.prev_count);
  276. barrier();
  277. val = read_pmc(counter->hw.idx);
  278. } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
  279. /* The counters are only 32 bits wide */
  280. delta = (val - prev) & 0xfffffffful;
  281. atomic64_add(delta, &counter->count);
  282. atomic64_sub(delta, &counter->hw.period_left);
  283. }
  284. /*
  285. * On some machines, PMC5 and PMC6 can't be written, don't respect
  286. * the freeze conditions, and don't generate interrupts. This tells
  287. * us if `counter' is using such a PMC.
  288. */
  289. static int is_limited_pmc(int pmcnum)
  290. {
  291. return (ppmu->flags & PPMU_LIMITED_PMC5_6)
  292. && (pmcnum == 5 || pmcnum == 6);
  293. }
  294. static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
  295. unsigned long pmc5, unsigned long pmc6)
  296. {
  297. struct perf_counter *counter;
  298. u64 val, prev, delta;
  299. int i;
  300. for (i = 0; i < cpuhw->n_limited; ++i) {
  301. counter = cpuhw->limited_counter[i];
  302. if (!counter->hw.idx)
  303. continue;
  304. val = (counter->hw.idx == 5) ? pmc5 : pmc6;
  305. prev = atomic64_read(&counter->hw.prev_count);
  306. counter->hw.idx = 0;
  307. delta = (val - prev) & 0xfffffffful;
  308. atomic64_add(delta, &counter->count);
  309. }
  310. }
  311. static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
  312. unsigned long pmc5, unsigned long pmc6)
  313. {
  314. struct perf_counter *counter;
  315. u64 val;
  316. int i;
  317. for (i = 0; i < cpuhw->n_limited; ++i) {
  318. counter = cpuhw->limited_counter[i];
  319. counter->hw.idx = cpuhw->limited_hwidx[i];
  320. val = (counter->hw.idx == 5) ? pmc5 : pmc6;
  321. atomic64_set(&counter->hw.prev_count, val);
  322. perf_counter_update_userpage(counter);
  323. }
  324. }
  325. /*
  326. * Since limited counters don't respect the freeze conditions, we
  327. * have to read them immediately after freezing or unfreezing the
  328. * other counters. We try to keep the values from the limited
  329. * counters as consistent as possible by keeping the delay (in
  330. * cycles and instructions) between freezing/unfreezing and reading
  331. * the limited counters as small and consistent as possible.
  332. * Therefore, if any limited counters are in use, we read them
  333. * both, and always in the same order, to minimize variability,
  334. * and do it inside the same asm that writes MMCR0.
  335. */
  336. static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
  337. {
  338. unsigned long pmc5, pmc6;
  339. if (!cpuhw->n_limited) {
  340. mtspr(SPRN_MMCR0, mmcr0);
  341. return;
  342. }
  343. /*
  344. * Write MMCR0, then read PMC5 and PMC6 immediately.
  345. * To ensure we don't get a performance monitor interrupt
  346. * between writing MMCR0 and freezing/thawing the limited
  347. * counters, we first write MMCR0 with the counter overflow
  348. * interrupt enable bits turned off.
  349. */
  350. asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
  351. : "=&r" (pmc5), "=&r" (pmc6)
  352. : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
  353. "i" (SPRN_MMCR0),
  354. "i" (SPRN_PMC5), "i" (SPRN_PMC6));
  355. if (mmcr0 & MMCR0_FC)
  356. freeze_limited_counters(cpuhw, pmc5, pmc6);
  357. else
  358. thaw_limited_counters(cpuhw, pmc5, pmc6);
  359. /*
  360. * Write the full MMCR0 including the counter overflow interrupt
  361. * enable bits, if necessary.
  362. */
  363. if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
  364. mtspr(SPRN_MMCR0, mmcr0);
  365. }
  366. /*
  367. * Disable all counters to prevent PMU interrupts and to allow
  368. * counters to be added or removed.
  369. */
  370. void hw_perf_disable(void)
  371. {
  372. struct cpu_hw_counters *cpuhw;
  373. unsigned long ret;
  374. unsigned long flags;
  375. local_irq_save(flags);
  376. cpuhw = &__get_cpu_var(cpu_hw_counters);
  377. ret = cpuhw->disabled;
  378. if (!ret) {
  379. cpuhw->disabled = 1;
  380. cpuhw->n_added = 0;
  381. /*
  382. * Check if we ever enabled the PMU on this cpu.
  383. */
  384. if (!cpuhw->pmcs_enabled) {
  385. if (ppc_md.enable_pmcs)
  386. ppc_md.enable_pmcs();
  387. cpuhw->pmcs_enabled = 1;
  388. }
  389. /*
  390. * Disable instruction sampling if it was enabled
  391. */
  392. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  393. mtspr(SPRN_MMCRA,
  394. cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  395. mb();
  396. }
  397. /*
  398. * Set the 'freeze counters' bit.
  399. * The barrier is to make sure the mtspr has been
  400. * executed and the PMU has frozen the counters
  401. * before we return.
  402. */
  403. write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
  404. mb();
  405. }
  406. local_irq_restore(flags);
  407. }
  408. /*
  409. * Re-enable all counters if disable == 0.
  410. * If we were previously disabled and counters were added, then
  411. * put the new config on the PMU.
  412. */
  413. void hw_perf_enable(void)
  414. {
  415. struct perf_counter *counter;
  416. struct cpu_hw_counters *cpuhw;
  417. unsigned long flags;
  418. long i;
  419. unsigned long val;
  420. s64 left;
  421. unsigned int hwc_index[MAX_HWCOUNTERS];
  422. int n_lim;
  423. int idx;
  424. local_irq_save(flags);
  425. cpuhw = &__get_cpu_var(cpu_hw_counters);
  426. if (!cpuhw->disabled) {
  427. local_irq_restore(flags);
  428. return;
  429. }
  430. cpuhw->disabled = 0;
  431. /*
  432. * If we didn't change anything, or only removed counters,
  433. * no need to recalculate MMCR* settings and reset the PMCs.
  434. * Just reenable the PMU with the current MMCR* settings
  435. * (possibly updated for removal of counters).
  436. */
  437. if (!cpuhw->n_added) {
  438. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  439. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  440. if (cpuhw->n_counters == 0)
  441. get_lppaca()->pmcregs_in_use = 0;
  442. goto out_enable;
  443. }
  444. /*
  445. * Compute MMCR* values for the new set of counters
  446. */
  447. if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
  448. cpuhw->mmcr)) {
  449. /* shouldn't ever get here */
  450. printk(KERN_ERR "oops compute_mmcr failed\n");
  451. goto out;
  452. }
  453. /*
  454. * Add in MMCR0 freeze bits corresponding to the
  455. * attr.exclude_* bits for the first counter.
  456. * We have already checked that all counters have the
  457. * same values for these bits as the first counter.
  458. */
  459. counter = cpuhw->counter[0];
  460. if (counter->attr.exclude_user)
  461. cpuhw->mmcr[0] |= MMCR0_FCP;
  462. if (counter->attr.exclude_kernel)
  463. cpuhw->mmcr[0] |= freeze_counters_kernel;
  464. if (counter->attr.exclude_hv)
  465. cpuhw->mmcr[0] |= MMCR0_FCHV;
  466. /*
  467. * Write the new configuration to MMCR* with the freeze
  468. * bit set and set the hardware counters to their initial values.
  469. * Then unfreeze the counters.
  470. */
  471. get_lppaca()->pmcregs_in_use = 1;
  472. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  473. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  474. mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
  475. | MMCR0_FC);
  476. /*
  477. * Read off any pre-existing counters that need to move
  478. * to another PMC.
  479. */
  480. for (i = 0; i < cpuhw->n_counters; ++i) {
  481. counter = cpuhw->counter[i];
  482. if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
  483. power_pmu_read(counter);
  484. write_pmc(counter->hw.idx, 0);
  485. counter->hw.idx = 0;
  486. }
  487. }
  488. /*
  489. * Initialize the PMCs for all the new and moved counters.
  490. */
  491. cpuhw->n_limited = n_lim = 0;
  492. for (i = 0; i < cpuhw->n_counters; ++i) {
  493. counter = cpuhw->counter[i];
  494. if (counter->hw.idx)
  495. continue;
  496. idx = hwc_index[i] + 1;
  497. if (is_limited_pmc(idx)) {
  498. cpuhw->limited_counter[n_lim] = counter;
  499. cpuhw->limited_hwidx[n_lim] = idx;
  500. ++n_lim;
  501. continue;
  502. }
  503. val = 0;
  504. if (counter->hw.sample_period) {
  505. left = atomic64_read(&counter->hw.period_left);
  506. if (left < 0x80000000L)
  507. val = 0x80000000L - left;
  508. }
  509. atomic64_set(&counter->hw.prev_count, val);
  510. counter->hw.idx = idx;
  511. write_pmc(idx, val);
  512. perf_counter_update_userpage(counter);
  513. }
  514. cpuhw->n_limited = n_lim;
  515. cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
  516. out_enable:
  517. mb();
  518. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  519. /*
  520. * Enable instruction sampling if necessary
  521. */
  522. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  523. mb();
  524. mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
  525. }
  526. out:
  527. local_irq_restore(flags);
  528. }
  529. static int collect_events(struct perf_counter *group, int max_count,
  530. struct perf_counter *ctrs[], u64 *events,
  531. unsigned int *flags)
  532. {
  533. int n = 0;
  534. struct perf_counter *counter;
  535. if (!is_software_counter(group)) {
  536. if (n >= max_count)
  537. return -1;
  538. ctrs[n] = group;
  539. flags[n] = group->hw.counter_base;
  540. events[n++] = group->hw.config;
  541. }
  542. list_for_each_entry(counter, &group->sibling_list, list_entry) {
  543. if (!is_software_counter(counter) &&
  544. counter->state != PERF_COUNTER_STATE_OFF) {
  545. if (n >= max_count)
  546. return -1;
  547. ctrs[n] = counter;
  548. flags[n] = counter->hw.counter_base;
  549. events[n++] = counter->hw.config;
  550. }
  551. }
  552. return n;
  553. }
  554. static void counter_sched_in(struct perf_counter *counter, int cpu)
  555. {
  556. counter->state = PERF_COUNTER_STATE_ACTIVE;
  557. counter->oncpu = cpu;
  558. counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
  559. if (is_software_counter(counter))
  560. counter->pmu->enable(counter);
  561. }
  562. /*
  563. * Called to enable a whole group of counters.
  564. * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
  565. * Assumes the caller has disabled interrupts and has
  566. * frozen the PMU with hw_perf_save_disable.
  567. */
  568. int hw_perf_group_sched_in(struct perf_counter *group_leader,
  569. struct perf_cpu_context *cpuctx,
  570. struct perf_counter_context *ctx, int cpu)
  571. {
  572. struct cpu_hw_counters *cpuhw;
  573. long i, n, n0;
  574. struct perf_counter *sub;
  575. cpuhw = &__get_cpu_var(cpu_hw_counters);
  576. n0 = cpuhw->n_counters;
  577. n = collect_events(group_leader, ppmu->n_counter - n0,
  578. &cpuhw->counter[n0], &cpuhw->events[n0],
  579. &cpuhw->flags[n0]);
  580. if (n < 0)
  581. return -EAGAIN;
  582. if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
  583. return -EAGAIN;
  584. i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
  585. if (i < 0)
  586. return -EAGAIN;
  587. cpuhw->n_counters = n0 + n;
  588. cpuhw->n_added += n;
  589. /*
  590. * OK, this group can go on; update counter states etc.,
  591. * and enable any software counters
  592. */
  593. for (i = n0; i < n0 + n; ++i)
  594. cpuhw->counter[i]->hw.config = cpuhw->events[i];
  595. cpuctx->active_oncpu += n;
  596. n = 1;
  597. counter_sched_in(group_leader, cpu);
  598. list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
  599. if (sub->state != PERF_COUNTER_STATE_OFF) {
  600. counter_sched_in(sub, cpu);
  601. ++n;
  602. }
  603. }
  604. ctx->nr_active += n;
  605. return 1;
  606. }
  607. /*
  608. * Add a counter to the PMU.
  609. * If all counters are not already frozen, then we disable and
  610. * re-enable the PMU in order to get hw_perf_enable to do the
  611. * actual work of reconfiguring the PMU.
  612. */
  613. static int power_pmu_enable(struct perf_counter *counter)
  614. {
  615. struct cpu_hw_counters *cpuhw;
  616. unsigned long flags;
  617. int n0;
  618. int ret = -EAGAIN;
  619. local_irq_save(flags);
  620. perf_disable();
  621. /*
  622. * Add the counter to the list (if there is room)
  623. * and check whether the total set is still feasible.
  624. */
  625. cpuhw = &__get_cpu_var(cpu_hw_counters);
  626. n0 = cpuhw->n_counters;
  627. if (n0 >= ppmu->n_counter)
  628. goto out;
  629. cpuhw->counter[n0] = counter;
  630. cpuhw->events[n0] = counter->hw.config;
  631. cpuhw->flags[n0] = counter->hw.counter_base;
  632. if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
  633. goto out;
  634. if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
  635. goto out;
  636. counter->hw.config = cpuhw->events[n0];
  637. ++cpuhw->n_counters;
  638. ++cpuhw->n_added;
  639. ret = 0;
  640. out:
  641. perf_enable();
  642. local_irq_restore(flags);
  643. return ret;
  644. }
  645. /*
  646. * Remove a counter from the PMU.
  647. */
  648. static void power_pmu_disable(struct perf_counter *counter)
  649. {
  650. struct cpu_hw_counters *cpuhw;
  651. long i;
  652. unsigned long flags;
  653. local_irq_save(flags);
  654. perf_disable();
  655. power_pmu_read(counter);
  656. cpuhw = &__get_cpu_var(cpu_hw_counters);
  657. for (i = 0; i < cpuhw->n_counters; ++i) {
  658. if (counter == cpuhw->counter[i]) {
  659. while (++i < cpuhw->n_counters)
  660. cpuhw->counter[i-1] = cpuhw->counter[i];
  661. --cpuhw->n_counters;
  662. ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
  663. if (counter->hw.idx) {
  664. write_pmc(counter->hw.idx, 0);
  665. counter->hw.idx = 0;
  666. }
  667. perf_counter_update_userpage(counter);
  668. break;
  669. }
  670. }
  671. for (i = 0; i < cpuhw->n_limited; ++i)
  672. if (counter == cpuhw->limited_counter[i])
  673. break;
  674. if (i < cpuhw->n_limited) {
  675. while (++i < cpuhw->n_limited) {
  676. cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
  677. cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
  678. }
  679. --cpuhw->n_limited;
  680. }
  681. if (cpuhw->n_counters == 0) {
  682. /* disable exceptions if no counters are running */
  683. cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
  684. }
  685. perf_enable();
  686. local_irq_restore(flags);
  687. }
  688. /*
  689. * Re-enable interrupts on a counter after they were throttled
  690. * because they were coming too fast.
  691. */
  692. static void power_pmu_unthrottle(struct perf_counter *counter)
  693. {
  694. s64 val, left;
  695. unsigned long flags;
  696. if (!counter->hw.idx || !counter->hw.sample_period)
  697. return;
  698. local_irq_save(flags);
  699. perf_disable();
  700. power_pmu_read(counter);
  701. left = counter->hw.sample_period;
  702. val = 0;
  703. if (left < 0x80000000L)
  704. val = 0x80000000L - left;
  705. write_pmc(counter->hw.idx, val);
  706. atomic64_set(&counter->hw.prev_count, val);
  707. atomic64_set(&counter->hw.period_left, left);
  708. perf_counter_update_userpage(counter);
  709. perf_enable();
  710. local_irq_restore(flags);
  711. }
  712. struct pmu power_pmu = {
  713. .enable = power_pmu_enable,
  714. .disable = power_pmu_disable,
  715. .read = power_pmu_read,
  716. .unthrottle = power_pmu_unthrottle,
  717. };
  718. /*
  719. * Return 1 if we might be able to put counter on a limited PMC,
  720. * or 0 if not.
  721. * A counter can only go on a limited PMC if it counts something
  722. * that a limited PMC can count, doesn't require interrupts, and
  723. * doesn't exclude any processor mode.
  724. */
  725. static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
  726. unsigned int flags)
  727. {
  728. int n;
  729. u64 alt[MAX_EVENT_ALTERNATIVES];
  730. if (counter->attr.exclude_user
  731. || counter->attr.exclude_kernel
  732. || counter->attr.exclude_hv
  733. || counter->attr.sample_period)
  734. return 0;
  735. if (ppmu->limited_pmc_event(ev))
  736. return 1;
  737. /*
  738. * The requested event isn't on a limited PMC already;
  739. * see if any alternative code goes on a limited PMC.
  740. */
  741. if (!ppmu->get_alternatives)
  742. return 0;
  743. flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
  744. n = ppmu->get_alternatives(ev, flags, alt);
  745. return n > 0;
  746. }
  747. /*
  748. * Find an alternative event that goes on a normal PMC, if possible,
  749. * and return the event code, or 0 if there is no such alternative.
  750. * (Note: event code 0 is "don't count" on all machines.)
  751. */
  752. static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
  753. {
  754. u64 alt[MAX_EVENT_ALTERNATIVES];
  755. int n;
  756. flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
  757. n = ppmu->get_alternatives(ev, flags, alt);
  758. if (!n)
  759. return 0;
  760. return alt[0];
  761. }
  762. /* Number of perf_counters counting hardware events */
  763. static atomic_t num_counters;
  764. /* Used to avoid races in calling reserve/release_pmc_hardware */
  765. static DEFINE_MUTEX(pmc_reserve_mutex);
  766. /*
  767. * Release the PMU if this is the last perf_counter.
  768. */
  769. static void hw_perf_counter_destroy(struct perf_counter *counter)
  770. {
  771. if (!atomic_add_unless(&num_counters, -1, 1)) {
  772. mutex_lock(&pmc_reserve_mutex);
  773. if (atomic_dec_return(&num_counters) == 0)
  774. release_pmc_hardware();
  775. mutex_unlock(&pmc_reserve_mutex);
  776. }
  777. }
  778. const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  779. {
  780. u64 ev;
  781. unsigned long flags;
  782. struct perf_counter *ctrs[MAX_HWCOUNTERS];
  783. u64 events[MAX_HWCOUNTERS];
  784. unsigned int cflags[MAX_HWCOUNTERS];
  785. int n;
  786. int err;
  787. if (!ppmu)
  788. return ERR_PTR(-ENXIO);
  789. if (counter->attr.type != PERF_TYPE_RAW) {
  790. ev = counter->attr.config;
  791. if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
  792. return ERR_PTR(-EOPNOTSUPP);
  793. ev = ppmu->generic_events[ev];
  794. } else {
  795. ev = counter->attr.config;
  796. }
  797. counter->hw.config_base = ev;
  798. counter->hw.idx = 0;
  799. /*
  800. * If we are not running on a hypervisor, force the
  801. * exclude_hv bit to 0 so that we don't care what
  802. * the user set it to.
  803. */
  804. if (!firmware_has_feature(FW_FEATURE_LPAR))
  805. counter->attr.exclude_hv = 0;
  806. /*
  807. * If this is a per-task counter, then we can use
  808. * PM_RUN_* events interchangeably with their non RUN_*
  809. * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
  810. * XXX we should check if the task is an idle task.
  811. */
  812. flags = 0;
  813. if (counter->ctx->task)
  814. flags |= PPMU_ONLY_COUNT_RUN;
  815. /*
  816. * If this machine has limited counters, check whether this
  817. * event could go on a limited counter.
  818. */
  819. if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
  820. if (can_go_on_limited_pmc(counter, ev, flags)) {
  821. flags |= PPMU_LIMITED_PMC_OK;
  822. } else if (ppmu->limited_pmc_event(ev)) {
  823. /*
  824. * The requested event is on a limited PMC,
  825. * but we can't use a limited PMC; see if any
  826. * alternative goes on a normal PMC.
  827. */
  828. ev = normal_pmc_alternative(ev, flags);
  829. if (!ev)
  830. return ERR_PTR(-EINVAL);
  831. }
  832. }
  833. /*
  834. * If this is in a group, check if it can go on with all the
  835. * other hardware counters in the group. We assume the counter
  836. * hasn't been linked into its leader's sibling list at this point.
  837. */
  838. n = 0;
  839. if (counter->group_leader != counter) {
  840. n = collect_events(counter->group_leader, ppmu->n_counter - 1,
  841. ctrs, events, cflags);
  842. if (n < 0)
  843. return ERR_PTR(-EINVAL);
  844. }
  845. events[n] = ev;
  846. ctrs[n] = counter;
  847. cflags[n] = flags;
  848. if (check_excludes(ctrs, cflags, n, 1))
  849. return ERR_PTR(-EINVAL);
  850. if (power_check_constraints(events, cflags, n + 1))
  851. return ERR_PTR(-EINVAL);
  852. counter->hw.config = events[n];
  853. counter->hw.counter_base = cflags[n];
  854. atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
  855. /*
  856. * See if we need to reserve the PMU.
  857. * If no counters are currently in use, then we have to take a
  858. * mutex to ensure that we don't race with another task doing
  859. * reserve_pmc_hardware or release_pmc_hardware.
  860. */
  861. err = 0;
  862. if (!atomic_inc_not_zero(&num_counters)) {
  863. mutex_lock(&pmc_reserve_mutex);
  864. if (atomic_read(&num_counters) == 0 &&
  865. reserve_pmc_hardware(perf_counter_interrupt))
  866. err = -EBUSY;
  867. else
  868. atomic_inc(&num_counters);
  869. mutex_unlock(&pmc_reserve_mutex);
  870. }
  871. counter->destroy = hw_perf_counter_destroy;
  872. if (err)
  873. return ERR_PTR(err);
  874. return &power_pmu;
  875. }
  876. /*
  877. * A counter has overflowed; update its count and record
  878. * things if requested. Note that interrupts are hard-disabled
  879. * here so there is no possibility of being interrupted.
  880. */
  881. static void record_and_restart(struct perf_counter *counter, long val,
  882. struct pt_regs *regs, int nmi)
  883. {
  884. u64 period = counter->hw.sample_period;
  885. s64 prev, delta, left;
  886. int record = 0;
  887. u64 addr, mmcra, sdsync;
  888. /* we don't have to worry about interrupts here */
  889. prev = atomic64_read(&counter->hw.prev_count);
  890. delta = (val - prev) & 0xfffffffful;
  891. atomic64_add(delta, &counter->count);
  892. /*
  893. * See if the total period for this counter has expired,
  894. * and update for the next period.
  895. */
  896. val = 0;
  897. left = atomic64_read(&counter->hw.period_left) - delta;
  898. if (period) {
  899. if (left <= 0) {
  900. left += period;
  901. if (left <= 0)
  902. left = period;
  903. record = 1;
  904. }
  905. if (left < 0x80000000L)
  906. val = 0x80000000L - left;
  907. }
  908. /*
  909. * Finally record data if requested.
  910. */
  911. if (record) {
  912. addr = 0;
  913. if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
  914. /*
  915. * The user wants a data address recorded.
  916. * If we're not doing instruction sampling,
  917. * give them the SDAR (sampled data address).
  918. * If we are doing instruction sampling, then only
  919. * give them the SDAR if it corresponds to the
  920. * instruction pointed to by SIAR; this is indicated
  921. * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
  922. */
  923. mmcra = regs->dsisr;
  924. sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
  925. POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
  926. if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
  927. addr = mfspr(SPRN_SDAR);
  928. }
  929. if (perf_counter_overflow(counter, nmi, regs, addr)) {
  930. /*
  931. * Interrupts are coming too fast - throttle them
  932. * by setting the counter to 0, so it will be
  933. * at least 2^30 cycles until the next interrupt
  934. * (assuming each counter counts at most 2 counts
  935. * per cycle).
  936. */
  937. val = 0;
  938. left = ~0ULL >> 1;
  939. }
  940. }
  941. write_pmc(counter->hw.idx, val);
  942. atomic64_set(&counter->hw.prev_count, val);
  943. atomic64_set(&counter->hw.period_left, left);
  944. perf_counter_update_userpage(counter);
  945. }
  946. /*
  947. * Called from generic code to get the misc flags (i.e. processor mode)
  948. * for an event.
  949. */
  950. unsigned long perf_misc_flags(struct pt_regs *regs)
  951. {
  952. unsigned long mmcra;
  953. if (TRAP(regs) != 0xf00) {
  954. /* not a PMU interrupt */
  955. return user_mode(regs) ? PERF_EVENT_MISC_USER :
  956. PERF_EVENT_MISC_KERNEL;
  957. }
  958. mmcra = regs->dsisr;
  959. if (ppmu->flags & PPMU_ALT_SIPR) {
  960. if (mmcra & POWER6_MMCRA_SIHV)
  961. return PERF_EVENT_MISC_HYPERVISOR;
  962. return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
  963. PERF_EVENT_MISC_KERNEL;
  964. }
  965. if (mmcra & MMCRA_SIHV)
  966. return PERF_EVENT_MISC_HYPERVISOR;
  967. return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
  968. PERF_EVENT_MISC_KERNEL;
  969. }
  970. /*
  971. * Called from generic code to get the instruction pointer
  972. * for an event.
  973. */
  974. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  975. {
  976. unsigned long mmcra;
  977. unsigned long ip;
  978. unsigned long slot;
  979. if (TRAP(regs) != 0xf00)
  980. return regs->nip; /* not a PMU interrupt */
  981. ip = mfspr(SPRN_SIAR);
  982. mmcra = regs->dsisr;
  983. if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
  984. slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
  985. if (slot > 1)
  986. ip += 4 * (slot - 1);
  987. }
  988. return ip;
  989. }
  990. /*
  991. * Performance monitor interrupt stuff
  992. */
  993. static void perf_counter_interrupt(struct pt_regs *regs)
  994. {
  995. int i;
  996. struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
  997. struct perf_counter *counter;
  998. long val;
  999. int found = 0;
  1000. int nmi;
  1001. if (cpuhw->n_limited)
  1002. freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
  1003. mfspr(SPRN_PMC6));
  1004. /*
  1005. * Overload regs->dsisr to store MMCRA so we only need to read it once.
  1006. */
  1007. regs->dsisr = mfspr(SPRN_MMCRA);
  1008. /*
  1009. * If interrupts were soft-disabled when this PMU interrupt
  1010. * occurred, treat it as an NMI.
  1011. */
  1012. nmi = !regs->softe;
  1013. if (nmi)
  1014. nmi_enter();
  1015. else
  1016. irq_enter();
  1017. for (i = 0; i < cpuhw->n_counters; ++i) {
  1018. counter = cpuhw->counter[i];
  1019. if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
  1020. continue;
  1021. val = read_pmc(counter->hw.idx);
  1022. if ((int)val < 0) {
  1023. /* counter has overflowed */
  1024. found = 1;
  1025. record_and_restart(counter, val, regs, nmi);
  1026. }
  1027. }
  1028. /*
  1029. * In case we didn't find and reset the counter that caused
  1030. * the interrupt, scan all counters and reset any that are
  1031. * negative, to avoid getting continual interrupts.
  1032. * Any that we processed in the previous loop will not be negative.
  1033. */
  1034. if (!found) {
  1035. for (i = 0; i < ppmu->n_counter; ++i) {
  1036. if (is_limited_pmc(i + 1))
  1037. continue;
  1038. val = read_pmc(i + 1);
  1039. if ((int)val < 0)
  1040. write_pmc(i + 1, 0);
  1041. }
  1042. }
  1043. /*
  1044. * Reset MMCR0 to its normal value. This will set PMXE and
  1045. * clear FC (freeze counters) and PMAO (perf mon alert occurred)
  1046. * and thus allow interrupts to occur again.
  1047. * XXX might want to use MSR.PM to keep the counters frozen until
  1048. * we get back out of this interrupt.
  1049. */
  1050. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  1051. if (nmi)
  1052. nmi_exit();
  1053. else
  1054. irq_exit();
  1055. }
  1056. void hw_perf_counter_setup(int cpu)
  1057. {
  1058. struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
  1059. memset(cpuhw, 0, sizeof(*cpuhw));
  1060. cpuhw->mmcr[0] = MMCR0_FC;
  1061. }
  1062. extern struct power_pmu power4_pmu;
  1063. extern struct power_pmu ppc970_pmu;
  1064. extern struct power_pmu power5_pmu;
  1065. extern struct power_pmu power5p_pmu;
  1066. extern struct power_pmu power6_pmu;
  1067. static int init_perf_counters(void)
  1068. {
  1069. unsigned long pvr;
  1070. /* XXX should get this from cputable */
  1071. pvr = mfspr(SPRN_PVR);
  1072. switch (PVR_VER(pvr)) {
  1073. case PV_POWER4:
  1074. case PV_POWER4p:
  1075. ppmu = &power4_pmu;
  1076. break;
  1077. case PV_970:
  1078. case PV_970FX:
  1079. case PV_970MP:
  1080. ppmu = &ppc970_pmu;
  1081. break;
  1082. case PV_POWER5:
  1083. ppmu = &power5_pmu;
  1084. break;
  1085. case PV_POWER5p:
  1086. ppmu = &power5p_pmu;
  1087. break;
  1088. case 0x3e:
  1089. ppmu = &power6_pmu;
  1090. break;
  1091. }
  1092. /*
  1093. * Use FCHV to ignore kernel events if MSR.HV is set.
  1094. */
  1095. if (mfmsr() & MSR_HV)
  1096. freeze_counters_kernel = MMCR0_FCHV;
  1097. return 0;
  1098. }
  1099. arch_initcall(init_perf_counters);