core-book3s.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599
  1. /*
  2. * Performance event support - powerpc architecture code
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/perf_event.h>
  14. #include <linux/percpu.h>
  15. #include <linux/hardirq.h>
  16. #include <asm/reg.h>
  17. #include <asm/pmc.h>
  18. #include <asm/machdep.h>
  19. #include <asm/firmware.h>
  20. #include <asm/ptrace.h>
  21. struct cpu_hw_events {
  22. int n_events;
  23. int n_percpu;
  24. int disabled;
  25. int n_added;
  26. int n_limited;
  27. u8 pmcs_enabled;
  28. struct perf_event *event[MAX_HWEVENTS];
  29. u64 events[MAX_HWEVENTS];
  30. unsigned int flags[MAX_HWEVENTS];
  31. unsigned long mmcr[3];
  32. struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
  33. u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  34. u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  35. unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  36. unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  37. unsigned int group_flag;
  38. int n_txn_start;
  39. };
  40. DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  41. struct power_pmu *ppmu;
  42. /*
  43. * Normally, to ignore kernel events we set the FCS (freeze counters
  44. * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  45. * hypervisor bit set in the MSR, or if we are running on a processor
  46. * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  47. * then we need to use the FCHV bit to ignore kernel events.
  48. */
  49. static unsigned int freeze_events_kernel = MMCR0_FCS;
  50. /*
  51. * 32-bit doesn't have MMCRA but does have an MMCR2,
  52. * and a few other names are different.
  53. */
  54. #ifdef CONFIG_PPC32
  55. #define MMCR0_FCHV 0
  56. #define MMCR0_PMCjCE MMCR0_PMCnCE
  57. #define SPRN_MMCRA SPRN_MMCR2
  58. #define MMCRA_SAMPLE_ENABLE 0
  59. static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  60. {
  61. return 0;
  62. }
  63. static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
  64. static inline u32 perf_get_misc_flags(struct pt_regs *regs)
  65. {
  66. return 0;
  67. }
  68. static inline void perf_read_regs(struct pt_regs *regs)
  69. {
  70. regs->result = 0;
  71. }
  72. static inline int perf_intr_is_nmi(struct pt_regs *regs)
  73. {
  74. return 0;
  75. }
  76. static inline int siar_valid(struct pt_regs *regs)
  77. {
  78. return 1;
  79. }
  80. #endif /* CONFIG_PPC32 */
  81. /*
  82. * Things that are specific to 64-bit implementations.
  83. */
  84. #ifdef CONFIG_PPC64
  85. static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  86. {
  87. unsigned long mmcra = regs->dsisr;
  88. if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
  89. unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
  90. if (slot > 1)
  91. return 4 * (slot - 1);
  92. }
  93. return 0;
  94. }
  95. /*
  96. * The user wants a data address recorded.
  97. * If we're not doing instruction sampling, give them the SDAR
  98. * (sampled data address). If we are doing instruction sampling, then
  99. * only give them the SDAR if it corresponds to the instruction
  100. * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
  101. * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
  102. */
  103. static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
  104. {
  105. unsigned long mmcra = regs->dsisr;
  106. unsigned long sdsync;
  107. if (ppmu->flags & PPMU_SIAR_VALID)
  108. sdsync = POWER7P_MMCRA_SDAR_VALID;
  109. else if (ppmu->flags & PPMU_ALT_SIPR)
  110. sdsync = POWER6_MMCRA_SDSYNC;
  111. else
  112. sdsync = MMCRA_SDSYNC;
  113. if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
  114. *addrp = mfspr(SPRN_SDAR);
  115. }
  116. static bool mmcra_sihv(unsigned long mmcra)
  117. {
  118. unsigned long sihv = MMCRA_SIHV;
  119. if (ppmu->flags & PPMU_ALT_SIPR)
  120. sihv = POWER6_MMCRA_SIHV;
  121. return !!(mmcra & sihv);
  122. }
  123. static bool mmcra_sipr(unsigned long mmcra)
  124. {
  125. unsigned long sipr = MMCRA_SIPR;
  126. if (ppmu->flags & PPMU_ALT_SIPR)
  127. sipr = POWER6_MMCRA_SIPR;
  128. return !!(mmcra & sipr);
  129. }
  130. static inline u32 perf_flags_from_msr(struct pt_regs *regs)
  131. {
  132. if (regs->msr & MSR_PR)
  133. return PERF_RECORD_MISC_USER;
  134. if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
  135. return PERF_RECORD_MISC_HYPERVISOR;
  136. return PERF_RECORD_MISC_KERNEL;
  137. }
  138. static inline u32 perf_get_misc_flags(struct pt_regs *regs)
  139. {
  140. unsigned long mmcra = regs->dsisr;
  141. unsigned long use_siar = regs->result;
  142. if (!use_siar)
  143. return perf_flags_from_msr(regs);
  144. /*
  145. * If we don't have flags in MMCRA, rather than using
  146. * the MSR, we intuit the flags from the address in
  147. * SIAR which should give slightly more reliable
  148. * results
  149. */
  150. if (ppmu->flags & PPMU_NO_SIPR) {
  151. unsigned long siar = mfspr(SPRN_SIAR);
  152. if (siar >= PAGE_OFFSET)
  153. return PERF_RECORD_MISC_KERNEL;
  154. return PERF_RECORD_MISC_USER;
  155. }
  156. /* PR has priority over HV, so order below is important */
  157. if (mmcra_sipr(mmcra))
  158. return PERF_RECORD_MISC_USER;
  159. if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV))
  160. return PERF_RECORD_MISC_HYPERVISOR;
  161. return PERF_RECORD_MISC_KERNEL;
  162. }
  163. /*
  164. * Overload regs->dsisr to store MMCRA so we only need to read it once
  165. * on each interrupt.
  166. * Overload regs->result to specify whether we should use the MSR (result
  167. * is zero) or the SIAR (result is non zero).
  168. */
  169. static inline void perf_read_regs(struct pt_regs *regs)
  170. {
  171. unsigned long mmcra = mfspr(SPRN_MMCRA);
  172. int marked = mmcra & MMCRA_SAMPLE_ENABLE;
  173. int use_siar;
  174. /*
  175. * If this isn't a PMU exception (eg a software event) the SIAR is
  176. * not valid. Use pt_regs.
  177. *
  178. * If it is a marked event use the SIAR.
  179. *
  180. * If the PMU doesn't update the SIAR for non marked events use
  181. * pt_regs.
  182. *
  183. * If the PMU has HV/PR flags then check to see if they
  184. * place the exception in userspace. If so, use pt_regs. In
  185. * continuous sampling mode the SIAR and the PMU exception are
  186. * not synchronised, so they may be many instructions apart.
  187. * This can result in confusing backtraces. We still want
  188. * hypervisor samples as well as samples in the kernel with
  189. * interrupts off hence the userspace check.
  190. */
  191. if (TRAP(regs) != 0xf00)
  192. use_siar = 0;
  193. else if (marked)
  194. use_siar = 1;
  195. else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
  196. use_siar = 0;
  197. else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra))
  198. use_siar = 0;
  199. else
  200. use_siar = 1;
  201. regs->dsisr = mmcra;
  202. regs->result = use_siar;
  203. }
  204. /*
  205. * If interrupts were soft-disabled when a PMU interrupt occurs, treat
  206. * it as an NMI.
  207. */
  208. static inline int perf_intr_is_nmi(struct pt_regs *regs)
  209. {
  210. return !regs->softe;
  211. }
  212. /*
  213. * On processors like P7+ that have the SIAR-Valid bit, marked instructions
  214. * must be sampled only if the SIAR-valid bit is set.
  215. *
  216. * For unmarked instructions and for processors that don't have the SIAR-Valid
  217. * bit, assume that SIAR is valid.
  218. */
  219. static inline int siar_valid(struct pt_regs *regs)
  220. {
  221. unsigned long mmcra = regs->dsisr;
  222. int marked = mmcra & MMCRA_SAMPLE_ENABLE;
  223. if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
  224. return mmcra & POWER7P_MMCRA_SIAR_VALID;
  225. return 1;
  226. }
  227. #endif /* CONFIG_PPC64 */
  228. static void perf_event_interrupt(struct pt_regs *regs);
  229. void perf_event_print_debug(void)
  230. {
  231. }
  232. /*
  233. * Read one performance monitor counter (PMC).
  234. */
  235. static unsigned long read_pmc(int idx)
  236. {
  237. unsigned long val;
  238. switch (idx) {
  239. case 1:
  240. val = mfspr(SPRN_PMC1);
  241. break;
  242. case 2:
  243. val = mfspr(SPRN_PMC2);
  244. break;
  245. case 3:
  246. val = mfspr(SPRN_PMC3);
  247. break;
  248. case 4:
  249. val = mfspr(SPRN_PMC4);
  250. break;
  251. case 5:
  252. val = mfspr(SPRN_PMC5);
  253. break;
  254. case 6:
  255. val = mfspr(SPRN_PMC6);
  256. break;
  257. #ifdef CONFIG_PPC64
  258. case 7:
  259. val = mfspr(SPRN_PMC7);
  260. break;
  261. case 8:
  262. val = mfspr(SPRN_PMC8);
  263. break;
  264. #endif /* CONFIG_PPC64 */
  265. default:
  266. printk(KERN_ERR "oops trying to read PMC%d\n", idx);
  267. val = 0;
  268. }
  269. return val;
  270. }
  271. /*
  272. * Write one PMC.
  273. */
  274. static void write_pmc(int idx, unsigned long val)
  275. {
  276. switch (idx) {
  277. case 1:
  278. mtspr(SPRN_PMC1, val);
  279. break;
  280. case 2:
  281. mtspr(SPRN_PMC2, val);
  282. break;
  283. case 3:
  284. mtspr(SPRN_PMC3, val);
  285. break;
  286. case 4:
  287. mtspr(SPRN_PMC4, val);
  288. break;
  289. case 5:
  290. mtspr(SPRN_PMC5, val);
  291. break;
  292. case 6:
  293. mtspr(SPRN_PMC6, val);
  294. break;
  295. #ifdef CONFIG_PPC64
  296. case 7:
  297. mtspr(SPRN_PMC7, val);
  298. break;
  299. case 8:
  300. mtspr(SPRN_PMC8, val);
  301. break;
  302. #endif /* CONFIG_PPC64 */
  303. default:
  304. printk(KERN_ERR "oops trying to write PMC%d\n", idx);
  305. }
  306. }
  307. /*
  308. * Check if a set of events can all go on the PMU at once.
  309. * If they can't, this will look at alternative codes for the events
  310. * and see if any combination of alternative codes is feasible.
  311. * The feasible set is returned in event_id[].
  312. */
  313. static int power_check_constraints(struct cpu_hw_events *cpuhw,
  314. u64 event_id[], unsigned int cflags[],
  315. int n_ev)
  316. {
  317. unsigned long mask, value, nv;
  318. unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
  319. int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
  320. int i, j;
  321. unsigned long addf = ppmu->add_fields;
  322. unsigned long tadd = ppmu->test_adder;
  323. if (n_ev > ppmu->n_counter)
  324. return -1;
  325. /* First see if the events will go on as-is */
  326. for (i = 0; i < n_ev; ++i) {
  327. if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
  328. && !ppmu->limited_pmc_event(event_id[i])) {
  329. ppmu->get_alternatives(event_id[i], cflags[i],
  330. cpuhw->alternatives[i]);
  331. event_id[i] = cpuhw->alternatives[i][0];
  332. }
  333. if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
  334. &cpuhw->avalues[i][0]))
  335. return -1;
  336. }
  337. value = mask = 0;
  338. for (i = 0; i < n_ev; ++i) {
  339. nv = (value | cpuhw->avalues[i][0]) +
  340. (value & cpuhw->avalues[i][0] & addf);
  341. if ((((nv + tadd) ^ value) & mask) != 0 ||
  342. (((nv + tadd) ^ cpuhw->avalues[i][0]) &
  343. cpuhw->amasks[i][0]) != 0)
  344. break;
  345. value = nv;
  346. mask |= cpuhw->amasks[i][0];
  347. }
  348. if (i == n_ev)
  349. return 0; /* all OK */
  350. /* doesn't work, gather alternatives... */
  351. if (!ppmu->get_alternatives)
  352. return -1;
  353. for (i = 0; i < n_ev; ++i) {
  354. choice[i] = 0;
  355. n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
  356. cpuhw->alternatives[i]);
  357. for (j = 1; j < n_alt[i]; ++j)
  358. ppmu->get_constraint(cpuhw->alternatives[i][j],
  359. &cpuhw->amasks[i][j],
  360. &cpuhw->avalues[i][j]);
  361. }
  362. /* enumerate all possibilities and see if any will work */
  363. i = 0;
  364. j = -1;
  365. value = mask = nv = 0;
  366. while (i < n_ev) {
  367. if (j >= 0) {
  368. /* we're backtracking, restore context */
  369. value = svalues[i];
  370. mask = smasks[i];
  371. j = choice[i];
  372. }
  373. /*
  374. * See if any alternative k for event_id i,
  375. * where k > j, will satisfy the constraints.
  376. */
  377. while (++j < n_alt[i]) {
  378. nv = (value | cpuhw->avalues[i][j]) +
  379. (value & cpuhw->avalues[i][j] & addf);
  380. if ((((nv + tadd) ^ value) & mask) == 0 &&
  381. (((nv + tadd) ^ cpuhw->avalues[i][j])
  382. & cpuhw->amasks[i][j]) == 0)
  383. break;
  384. }
  385. if (j >= n_alt[i]) {
  386. /*
  387. * No feasible alternative, backtrack
  388. * to event_id i-1 and continue enumerating its
  389. * alternatives from where we got up to.
  390. */
  391. if (--i < 0)
  392. return -1;
  393. } else {
  394. /*
  395. * Found a feasible alternative for event_id i,
  396. * remember where we got up to with this event_id,
  397. * go on to the next event_id, and start with
  398. * the first alternative for it.
  399. */
  400. choice[i] = j;
  401. svalues[i] = value;
  402. smasks[i] = mask;
  403. value = nv;
  404. mask |= cpuhw->amasks[i][j];
  405. ++i;
  406. j = -1;
  407. }
  408. }
  409. /* OK, we have a feasible combination, tell the caller the solution */
  410. for (i = 0; i < n_ev; ++i)
  411. event_id[i] = cpuhw->alternatives[i][choice[i]];
  412. return 0;
  413. }
  414. /*
  415. * Check if newly-added events have consistent settings for
  416. * exclude_{user,kernel,hv} with each other and any previously
  417. * added events.
  418. */
  419. static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
  420. int n_prev, int n_new)
  421. {
  422. int eu = 0, ek = 0, eh = 0;
  423. int i, n, first;
  424. struct perf_event *event;
  425. n = n_prev + n_new;
  426. if (n <= 1)
  427. return 0;
  428. first = 1;
  429. for (i = 0; i < n; ++i) {
  430. if (cflags[i] & PPMU_LIMITED_PMC_OK) {
  431. cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
  432. continue;
  433. }
  434. event = ctrs[i];
  435. if (first) {
  436. eu = event->attr.exclude_user;
  437. ek = event->attr.exclude_kernel;
  438. eh = event->attr.exclude_hv;
  439. first = 0;
  440. } else if (event->attr.exclude_user != eu ||
  441. event->attr.exclude_kernel != ek ||
  442. event->attr.exclude_hv != eh) {
  443. return -EAGAIN;
  444. }
  445. }
  446. if (eu || ek || eh)
  447. for (i = 0; i < n; ++i)
  448. if (cflags[i] & PPMU_LIMITED_PMC_OK)
  449. cflags[i] |= PPMU_LIMITED_PMC_REQD;
  450. return 0;
  451. }
  452. static u64 check_and_compute_delta(u64 prev, u64 val)
  453. {
  454. u64 delta = (val - prev) & 0xfffffffful;
  455. /*
  456. * POWER7 can roll back counter values, if the new value is smaller
  457. * than the previous value it will cause the delta and the counter to
  458. * have bogus values unless we rolled a counter over. If a coutner is
  459. * rolled back, it will be smaller, but within 256, which is the maximum
  460. * number of events to rollback at once. If we dectect a rollback
  461. * return 0. This can lead to a small lack of precision in the
  462. * counters.
  463. */
  464. if (prev > val && (prev - val) < 256)
  465. delta = 0;
  466. return delta;
  467. }
  468. static void power_pmu_read(struct perf_event *event)
  469. {
  470. s64 val, delta, prev;
  471. if (event->hw.state & PERF_HES_STOPPED)
  472. return;
  473. if (!event->hw.idx)
  474. return;
  475. /*
  476. * Performance monitor interrupts come even when interrupts
  477. * are soft-disabled, as long as interrupts are hard-enabled.
  478. * Therefore we treat them like NMIs.
  479. */
  480. do {
  481. prev = local64_read(&event->hw.prev_count);
  482. barrier();
  483. val = read_pmc(event->hw.idx);
  484. delta = check_and_compute_delta(prev, val);
  485. if (!delta)
  486. return;
  487. } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
  488. local64_add(delta, &event->count);
  489. local64_sub(delta, &event->hw.period_left);
  490. }
  491. /*
  492. * On some machines, PMC5 and PMC6 can't be written, don't respect
  493. * the freeze conditions, and don't generate interrupts. This tells
  494. * us if `event' is using such a PMC.
  495. */
  496. static int is_limited_pmc(int pmcnum)
  497. {
  498. return (ppmu->flags & PPMU_LIMITED_PMC5_6)
  499. && (pmcnum == 5 || pmcnum == 6);
  500. }
  501. static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
  502. unsigned long pmc5, unsigned long pmc6)
  503. {
  504. struct perf_event *event;
  505. u64 val, prev, delta;
  506. int i;
  507. for (i = 0; i < cpuhw->n_limited; ++i) {
  508. event = cpuhw->limited_counter[i];
  509. if (!event->hw.idx)
  510. continue;
  511. val = (event->hw.idx == 5) ? pmc5 : pmc6;
  512. prev = local64_read(&event->hw.prev_count);
  513. event->hw.idx = 0;
  514. delta = check_and_compute_delta(prev, val);
  515. if (delta)
  516. local64_add(delta, &event->count);
  517. }
  518. }
  519. static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
  520. unsigned long pmc5, unsigned long pmc6)
  521. {
  522. struct perf_event *event;
  523. u64 val, prev;
  524. int i;
  525. for (i = 0; i < cpuhw->n_limited; ++i) {
  526. event = cpuhw->limited_counter[i];
  527. event->hw.idx = cpuhw->limited_hwidx[i];
  528. val = (event->hw.idx == 5) ? pmc5 : pmc6;
  529. prev = local64_read(&event->hw.prev_count);
  530. if (check_and_compute_delta(prev, val))
  531. local64_set(&event->hw.prev_count, val);
  532. perf_event_update_userpage(event);
  533. }
  534. }
  535. /*
  536. * Since limited events don't respect the freeze conditions, we
  537. * have to read them immediately after freezing or unfreezing the
  538. * other events. We try to keep the values from the limited
  539. * events as consistent as possible by keeping the delay (in
  540. * cycles and instructions) between freezing/unfreezing and reading
  541. * the limited events as small and consistent as possible.
  542. * Therefore, if any limited events are in use, we read them
  543. * both, and always in the same order, to minimize variability,
  544. * and do it inside the same asm that writes MMCR0.
  545. */
  546. static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
  547. {
  548. unsigned long pmc5, pmc6;
  549. if (!cpuhw->n_limited) {
  550. mtspr(SPRN_MMCR0, mmcr0);
  551. return;
  552. }
  553. /*
  554. * Write MMCR0, then read PMC5 and PMC6 immediately.
  555. * To ensure we don't get a performance monitor interrupt
  556. * between writing MMCR0 and freezing/thawing the limited
  557. * events, we first write MMCR0 with the event overflow
  558. * interrupt enable bits turned off.
  559. */
  560. asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
  561. : "=&r" (pmc5), "=&r" (pmc6)
  562. : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
  563. "i" (SPRN_MMCR0),
  564. "i" (SPRN_PMC5), "i" (SPRN_PMC6));
  565. if (mmcr0 & MMCR0_FC)
  566. freeze_limited_counters(cpuhw, pmc5, pmc6);
  567. else
  568. thaw_limited_counters(cpuhw, pmc5, pmc6);
  569. /*
  570. * Write the full MMCR0 including the event overflow interrupt
  571. * enable bits, if necessary.
  572. */
  573. if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
  574. mtspr(SPRN_MMCR0, mmcr0);
  575. }
  576. /*
  577. * Disable all events to prevent PMU interrupts and to allow
  578. * events to be added or removed.
  579. */
  580. static void power_pmu_disable(struct pmu *pmu)
  581. {
  582. struct cpu_hw_events *cpuhw;
  583. unsigned long flags;
  584. if (!ppmu)
  585. return;
  586. local_irq_save(flags);
  587. cpuhw = &__get_cpu_var(cpu_hw_events);
  588. if (!cpuhw->disabled) {
  589. cpuhw->disabled = 1;
  590. cpuhw->n_added = 0;
  591. /*
  592. * Check if we ever enabled the PMU on this cpu.
  593. */
  594. if (!cpuhw->pmcs_enabled) {
  595. ppc_enable_pmcs();
  596. cpuhw->pmcs_enabled = 1;
  597. }
  598. /*
  599. * Disable instruction sampling if it was enabled
  600. */
  601. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  602. mtspr(SPRN_MMCRA,
  603. cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  604. mb();
  605. }
  606. /*
  607. * Set the 'freeze counters' bit.
  608. * The barrier is to make sure the mtspr has been
  609. * executed and the PMU has frozen the events
  610. * before we return.
  611. */
  612. write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
  613. mb();
  614. }
  615. local_irq_restore(flags);
  616. }
  617. /*
  618. * Re-enable all events if disable == 0.
  619. * If we were previously disabled and events were added, then
  620. * put the new config on the PMU.
  621. */
  622. static void power_pmu_enable(struct pmu *pmu)
  623. {
  624. struct perf_event *event;
  625. struct cpu_hw_events *cpuhw;
  626. unsigned long flags;
  627. long i;
  628. unsigned long val;
  629. s64 left;
  630. unsigned int hwc_index[MAX_HWEVENTS];
  631. int n_lim;
  632. int idx;
  633. if (!ppmu)
  634. return;
  635. local_irq_save(flags);
  636. cpuhw = &__get_cpu_var(cpu_hw_events);
  637. if (!cpuhw->disabled) {
  638. local_irq_restore(flags);
  639. return;
  640. }
  641. cpuhw->disabled = 0;
  642. /*
  643. * If we didn't change anything, or only removed events,
  644. * no need to recalculate MMCR* settings and reset the PMCs.
  645. * Just reenable the PMU with the current MMCR* settings
  646. * (possibly updated for removal of events).
  647. */
  648. if (!cpuhw->n_added) {
  649. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  650. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  651. if (cpuhw->n_events == 0)
  652. ppc_set_pmu_inuse(0);
  653. goto out_enable;
  654. }
  655. /*
  656. * Compute MMCR* values for the new set of events
  657. */
  658. if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
  659. cpuhw->mmcr)) {
  660. /* shouldn't ever get here */
  661. printk(KERN_ERR "oops compute_mmcr failed\n");
  662. goto out;
  663. }
  664. /*
  665. * Add in MMCR0 freeze bits corresponding to the
  666. * attr.exclude_* bits for the first event.
  667. * We have already checked that all events have the
  668. * same values for these bits as the first event.
  669. */
  670. event = cpuhw->event[0];
  671. if (event->attr.exclude_user)
  672. cpuhw->mmcr[0] |= MMCR0_FCP;
  673. if (event->attr.exclude_kernel)
  674. cpuhw->mmcr[0] |= freeze_events_kernel;
  675. if (event->attr.exclude_hv)
  676. cpuhw->mmcr[0] |= MMCR0_FCHV;
  677. /*
  678. * Write the new configuration to MMCR* with the freeze
  679. * bit set and set the hardware events to their initial values.
  680. * Then unfreeze the events.
  681. */
  682. ppc_set_pmu_inuse(1);
  683. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  684. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  685. mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
  686. | MMCR0_FC);
  687. /*
  688. * Read off any pre-existing events that need to move
  689. * to another PMC.
  690. */
  691. for (i = 0; i < cpuhw->n_events; ++i) {
  692. event = cpuhw->event[i];
  693. if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
  694. power_pmu_read(event);
  695. write_pmc(event->hw.idx, 0);
  696. event->hw.idx = 0;
  697. }
  698. }
  699. /*
  700. * Initialize the PMCs for all the new and moved events.
  701. */
  702. cpuhw->n_limited = n_lim = 0;
  703. for (i = 0; i < cpuhw->n_events; ++i) {
  704. event = cpuhw->event[i];
  705. if (event->hw.idx)
  706. continue;
  707. idx = hwc_index[i] + 1;
  708. if (is_limited_pmc(idx)) {
  709. cpuhw->limited_counter[n_lim] = event;
  710. cpuhw->limited_hwidx[n_lim] = idx;
  711. ++n_lim;
  712. continue;
  713. }
  714. val = 0;
  715. if (event->hw.sample_period) {
  716. left = local64_read(&event->hw.period_left);
  717. if (left < 0x80000000L)
  718. val = 0x80000000L - left;
  719. }
  720. local64_set(&event->hw.prev_count, val);
  721. event->hw.idx = idx;
  722. if (event->hw.state & PERF_HES_STOPPED)
  723. val = 0;
  724. write_pmc(idx, val);
  725. perf_event_update_userpage(event);
  726. }
  727. cpuhw->n_limited = n_lim;
  728. cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
  729. out_enable:
  730. mb();
  731. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  732. /*
  733. * Enable instruction sampling if necessary
  734. */
  735. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  736. mb();
  737. mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
  738. }
  739. out:
  740. local_irq_restore(flags);
  741. }
  742. static int collect_events(struct perf_event *group, int max_count,
  743. struct perf_event *ctrs[], u64 *events,
  744. unsigned int *flags)
  745. {
  746. int n = 0;
  747. struct perf_event *event;
  748. if (!is_software_event(group)) {
  749. if (n >= max_count)
  750. return -1;
  751. ctrs[n] = group;
  752. flags[n] = group->hw.event_base;
  753. events[n++] = group->hw.config;
  754. }
  755. list_for_each_entry(event, &group->sibling_list, group_entry) {
  756. if (!is_software_event(event) &&
  757. event->state != PERF_EVENT_STATE_OFF) {
  758. if (n >= max_count)
  759. return -1;
  760. ctrs[n] = event;
  761. flags[n] = event->hw.event_base;
  762. events[n++] = event->hw.config;
  763. }
  764. }
  765. return n;
  766. }
  767. /*
  768. * Add a event to the PMU.
  769. * If all events are not already frozen, then we disable and
  770. * re-enable the PMU in order to get hw_perf_enable to do the
  771. * actual work of reconfiguring the PMU.
  772. */
  773. static int power_pmu_add(struct perf_event *event, int ef_flags)
  774. {
  775. struct cpu_hw_events *cpuhw;
  776. unsigned long flags;
  777. int n0;
  778. int ret = -EAGAIN;
  779. local_irq_save(flags);
  780. perf_pmu_disable(event->pmu);
  781. /*
  782. * Add the event to the list (if there is room)
  783. * and check whether the total set is still feasible.
  784. */
  785. cpuhw = &__get_cpu_var(cpu_hw_events);
  786. n0 = cpuhw->n_events;
  787. if (n0 >= ppmu->n_counter)
  788. goto out;
  789. cpuhw->event[n0] = event;
  790. cpuhw->events[n0] = event->hw.config;
  791. cpuhw->flags[n0] = event->hw.event_base;
  792. /*
  793. * This event may have been disabled/stopped in record_and_restart()
  794. * because we exceeded the ->event_limit. If re-starting the event,
  795. * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
  796. * notification is re-enabled.
  797. */
  798. if (!(ef_flags & PERF_EF_START))
  799. event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  800. else
  801. event->hw.state = 0;
  802. /*
  803. * If group events scheduling transaction was started,
  804. * skip the schedulability test here, it will be performed
  805. * at commit time(->commit_txn) as a whole
  806. */
  807. if (cpuhw->group_flag & PERF_EVENT_TXN)
  808. goto nocheck;
  809. if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
  810. goto out;
  811. if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
  812. goto out;
  813. event->hw.config = cpuhw->events[n0];
  814. nocheck:
  815. ++cpuhw->n_events;
  816. ++cpuhw->n_added;
  817. ret = 0;
  818. out:
  819. perf_pmu_enable(event->pmu);
  820. local_irq_restore(flags);
  821. return ret;
  822. }
  823. /*
  824. * Remove a event from the PMU.
  825. */
  826. static void power_pmu_del(struct perf_event *event, int ef_flags)
  827. {
  828. struct cpu_hw_events *cpuhw;
  829. long i;
  830. unsigned long flags;
  831. local_irq_save(flags);
  832. perf_pmu_disable(event->pmu);
  833. power_pmu_read(event);
  834. cpuhw = &__get_cpu_var(cpu_hw_events);
  835. for (i = 0; i < cpuhw->n_events; ++i) {
  836. if (event == cpuhw->event[i]) {
  837. while (++i < cpuhw->n_events) {
  838. cpuhw->event[i-1] = cpuhw->event[i];
  839. cpuhw->events[i-1] = cpuhw->events[i];
  840. cpuhw->flags[i-1] = cpuhw->flags[i];
  841. }
  842. --cpuhw->n_events;
  843. ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
  844. if (event->hw.idx) {
  845. write_pmc(event->hw.idx, 0);
  846. event->hw.idx = 0;
  847. }
  848. perf_event_update_userpage(event);
  849. break;
  850. }
  851. }
  852. for (i = 0; i < cpuhw->n_limited; ++i)
  853. if (event == cpuhw->limited_counter[i])
  854. break;
  855. if (i < cpuhw->n_limited) {
  856. while (++i < cpuhw->n_limited) {
  857. cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
  858. cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
  859. }
  860. --cpuhw->n_limited;
  861. }
  862. if (cpuhw->n_events == 0) {
  863. /* disable exceptions if no events are running */
  864. cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
  865. }
  866. perf_pmu_enable(event->pmu);
  867. local_irq_restore(flags);
  868. }
  869. /*
  870. * POWER-PMU does not support disabling individual counters, hence
  871. * program their cycle counter to their max value and ignore the interrupts.
  872. */
  873. static void power_pmu_start(struct perf_event *event, int ef_flags)
  874. {
  875. unsigned long flags;
  876. s64 left;
  877. unsigned long val;
  878. if (!event->hw.idx || !event->hw.sample_period)
  879. return;
  880. if (!(event->hw.state & PERF_HES_STOPPED))
  881. return;
  882. if (ef_flags & PERF_EF_RELOAD)
  883. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  884. local_irq_save(flags);
  885. perf_pmu_disable(event->pmu);
  886. event->hw.state = 0;
  887. left = local64_read(&event->hw.period_left);
  888. val = 0;
  889. if (left < 0x80000000L)
  890. val = 0x80000000L - left;
  891. write_pmc(event->hw.idx, val);
  892. perf_event_update_userpage(event);
  893. perf_pmu_enable(event->pmu);
  894. local_irq_restore(flags);
  895. }
  896. static void power_pmu_stop(struct perf_event *event, int ef_flags)
  897. {
  898. unsigned long flags;
  899. if (!event->hw.idx || !event->hw.sample_period)
  900. return;
  901. if (event->hw.state & PERF_HES_STOPPED)
  902. return;
  903. local_irq_save(flags);
  904. perf_pmu_disable(event->pmu);
  905. power_pmu_read(event);
  906. event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  907. write_pmc(event->hw.idx, 0);
  908. perf_event_update_userpage(event);
  909. perf_pmu_enable(event->pmu);
  910. local_irq_restore(flags);
  911. }
  912. /*
  913. * Start group events scheduling transaction
  914. * Set the flag to make pmu::enable() not perform the
  915. * schedulability test, it will be performed at commit time
  916. */
  917. void power_pmu_start_txn(struct pmu *pmu)
  918. {
  919. struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  920. perf_pmu_disable(pmu);
  921. cpuhw->group_flag |= PERF_EVENT_TXN;
  922. cpuhw->n_txn_start = cpuhw->n_events;
  923. }
  924. /*
  925. * Stop group events scheduling transaction
  926. * Clear the flag and pmu::enable() will perform the
  927. * schedulability test.
  928. */
  929. void power_pmu_cancel_txn(struct pmu *pmu)
  930. {
  931. struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  932. cpuhw->group_flag &= ~PERF_EVENT_TXN;
  933. perf_pmu_enable(pmu);
  934. }
  935. /*
  936. * Commit group events scheduling transaction
  937. * Perform the group schedulability test as a whole
  938. * Return 0 if success
  939. */
  940. int power_pmu_commit_txn(struct pmu *pmu)
  941. {
  942. struct cpu_hw_events *cpuhw;
  943. long i, n;
  944. if (!ppmu)
  945. return -EAGAIN;
  946. cpuhw = &__get_cpu_var(cpu_hw_events);
  947. n = cpuhw->n_events;
  948. if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
  949. return -EAGAIN;
  950. i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
  951. if (i < 0)
  952. return -EAGAIN;
  953. for (i = cpuhw->n_txn_start; i < n; ++i)
  954. cpuhw->event[i]->hw.config = cpuhw->events[i];
  955. cpuhw->group_flag &= ~PERF_EVENT_TXN;
  956. perf_pmu_enable(pmu);
  957. return 0;
  958. }
  959. /*
  960. * Return 1 if we might be able to put event on a limited PMC,
  961. * or 0 if not.
  962. * A event can only go on a limited PMC if it counts something
  963. * that a limited PMC can count, doesn't require interrupts, and
  964. * doesn't exclude any processor mode.
  965. */
  966. static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
  967. unsigned int flags)
  968. {
  969. int n;
  970. u64 alt[MAX_EVENT_ALTERNATIVES];
  971. if (event->attr.exclude_user
  972. || event->attr.exclude_kernel
  973. || event->attr.exclude_hv
  974. || event->attr.sample_period)
  975. return 0;
  976. if (ppmu->limited_pmc_event(ev))
  977. return 1;
  978. /*
  979. * The requested event_id isn't on a limited PMC already;
  980. * see if any alternative code goes on a limited PMC.
  981. */
  982. if (!ppmu->get_alternatives)
  983. return 0;
  984. flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
  985. n = ppmu->get_alternatives(ev, flags, alt);
  986. return n > 0;
  987. }
  988. /*
  989. * Find an alternative event_id that goes on a normal PMC, if possible,
  990. * and return the event_id code, or 0 if there is no such alternative.
  991. * (Note: event_id code 0 is "don't count" on all machines.)
  992. */
  993. static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
  994. {
  995. u64 alt[MAX_EVENT_ALTERNATIVES];
  996. int n;
  997. flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
  998. n = ppmu->get_alternatives(ev, flags, alt);
  999. if (!n)
  1000. return 0;
  1001. return alt[0];
  1002. }
  1003. /* Number of perf_events counting hardware events */
  1004. static atomic_t num_events;
  1005. /* Used to avoid races in calling reserve/release_pmc_hardware */
  1006. static DEFINE_MUTEX(pmc_reserve_mutex);
  1007. /*
  1008. * Release the PMU if this is the last perf_event.
  1009. */
  1010. static void hw_perf_event_destroy(struct perf_event *event)
  1011. {
  1012. if (!atomic_add_unless(&num_events, -1, 1)) {
  1013. mutex_lock(&pmc_reserve_mutex);
  1014. if (atomic_dec_return(&num_events) == 0)
  1015. release_pmc_hardware();
  1016. mutex_unlock(&pmc_reserve_mutex);
  1017. }
  1018. }
  1019. /*
  1020. * Translate a generic cache event_id config to a raw event_id code.
  1021. */
  1022. static int hw_perf_cache_event(u64 config, u64 *eventp)
  1023. {
  1024. unsigned long type, op, result;
  1025. int ev;
  1026. if (!ppmu->cache_events)
  1027. return -EINVAL;
  1028. /* unpack config */
  1029. type = config & 0xff;
  1030. op = (config >> 8) & 0xff;
  1031. result = (config >> 16) & 0xff;
  1032. if (type >= PERF_COUNT_HW_CACHE_MAX ||
  1033. op >= PERF_COUNT_HW_CACHE_OP_MAX ||
  1034. result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  1035. return -EINVAL;
  1036. ev = (*ppmu->cache_events)[type][op][result];
  1037. if (ev == 0)
  1038. return -EOPNOTSUPP;
  1039. if (ev == -1)
  1040. return -EINVAL;
  1041. *eventp = ev;
  1042. return 0;
  1043. }
  1044. static int power_pmu_event_init(struct perf_event *event)
  1045. {
  1046. u64 ev;
  1047. unsigned long flags;
  1048. struct perf_event *ctrs[MAX_HWEVENTS];
  1049. u64 events[MAX_HWEVENTS];
  1050. unsigned int cflags[MAX_HWEVENTS];
  1051. int n;
  1052. int err;
  1053. struct cpu_hw_events *cpuhw;
  1054. if (!ppmu)
  1055. return -ENOENT;
  1056. /* does not support taken branch sampling */
  1057. if (has_branch_stack(event))
  1058. return -EOPNOTSUPP;
  1059. switch (event->attr.type) {
  1060. case PERF_TYPE_HARDWARE:
  1061. ev = event->attr.config;
  1062. if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
  1063. return -EOPNOTSUPP;
  1064. ev = ppmu->generic_events[ev];
  1065. break;
  1066. case PERF_TYPE_HW_CACHE:
  1067. err = hw_perf_cache_event(event->attr.config, &ev);
  1068. if (err)
  1069. return err;
  1070. break;
  1071. case PERF_TYPE_RAW:
  1072. ev = event->attr.config;
  1073. break;
  1074. default:
  1075. return -ENOENT;
  1076. }
  1077. event->hw.config_base = ev;
  1078. event->hw.idx = 0;
  1079. /*
  1080. * If we are not running on a hypervisor, force the
  1081. * exclude_hv bit to 0 so that we don't care what
  1082. * the user set it to.
  1083. */
  1084. if (!firmware_has_feature(FW_FEATURE_LPAR))
  1085. event->attr.exclude_hv = 0;
  1086. /*
  1087. * If this is a per-task event, then we can use
  1088. * PM_RUN_* events interchangeably with their non RUN_*
  1089. * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
  1090. * XXX we should check if the task is an idle task.
  1091. */
  1092. flags = 0;
  1093. if (event->attach_state & PERF_ATTACH_TASK)
  1094. flags |= PPMU_ONLY_COUNT_RUN;
  1095. /*
  1096. * If this machine has limited events, check whether this
  1097. * event_id could go on a limited event.
  1098. */
  1099. if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
  1100. if (can_go_on_limited_pmc(event, ev, flags)) {
  1101. flags |= PPMU_LIMITED_PMC_OK;
  1102. } else if (ppmu->limited_pmc_event(ev)) {
  1103. /*
  1104. * The requested event_id is on a limited PMC,
  1105. * but we can't use a limited PMC; see if any
  1106. * alternative goes on a normal PMC.
  1107. */
  1108. ev = normal_pmc_alternative(ev, flags);
  1109. if (!ev)
  1110. return -EINVAL;
  1111. }
  1112. }
  1113. /*
  1114. * If this is in a group, check if it can go on with all the
  1115. * other hardware events in the group. We assume the event
  1116. * hasn't been linked into its leader's sibling list at this point.
  1117. */
  1118. n = 0;
  1119. if (event->group_leader != event) {
  1120. n = collect_events(event->group_leader, ppmu->n_counter - 1,
  1121. ctrs, events, cflags);
  1122. if (n < 0)
  1123. return -EINVAL;
  1124. }
  1125. events[n] = ev;
  1126. ctrs[n] = event;
  1127. cflags[n] = flags;
  1128. if (check_excludes(ctrs, cflags, n, 1))
  1129. return -EINVAL;
  1130. cpuhw = &get_cpu_var(cpu_hw_events);
  1131. err = power_check_constraints(cpuhw, events, cflags, n + 1);
  1132. put_cpu_var(cpu_hw_events);
  1133. if (err)
  1134. return -EINVAL;
  1135. event->hw.config = events[n];
  1136. event->hw.event_base = cflags[n];
  1137. event->hw.last_period = event->hw.sample_period;
  1138. local64_set(&event->hw.period_left, event->hw.last_period);
  1139. /*
  1140. * See if we need to reserve the PMU.
  1141. * If no events are currently in use, then we have to take a
  1142. * mutex to ensure that we don't race with another task doing
  1143. * reserve_pmc_hardware or release_pmc_hardware.
  1144. */
  1145. err = 0;
  1146. if (!atomic_inc_not_zero(&num_events)) {
  1147. mutex_lock(&pmc_reserve_mutex);
  1148. if (atomic_read(&num_events) == 0 &&
  1149. reserve_pmc_hardware(perf_event_interrupt))
  1150. err = -EBUSY;
  1151. else
  1152. atomic_inc(&num_events);
  1153. mutex_unlock(&pmc_reserve_mutex);
  1154. }
  1155. event->destroy = hw_perf_event_destroy;
  1156. return err;
  1157. }
  1158. static int power_pmu_event_idx(struct perf_event *event)
  1159. {
  1160. return event->hw.idx;
  1161. }
  1162. ssize_t power_events_sysfs_show(struct device *dev,
  1163. struct device_attribute *attr, char *page)
  1164. {
  1165. struct perf_pmu_events_attr *pmu_attr;
  1166. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  1167. return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
  1168. }
  1169. struct pmu power_pmu = {
  1170. .pmu_enable = power_pmu_enable,
  1171. .pmu_disable = power_pmu_disable,
  1172. .event_init = power_pmu_event_init,
  1173. .add = power_pmu_add,
  1174. .del = power_pmu_del,
  1175. .start = power_pmu_start,
  1176. .stop = power_pmu_stop,
  1177. .read = power_pmu_read,
  1178. .start_txn = power_pmu_start_txn,
  1179. .cancel_txn = power_pmu_cancel_txn,
  1180. .commit_txn = power_pmu_commit_txn,
  1181. .event_idx = power_pmu_event_idx,
  1182. };
  1183. /*
  1184. * A counter has overflowed; update its count and record
  1185. * things if requested. Note that interrupts are hard-disabled
  1186. * here so there is no possibility of being interrupted.
  1187. */
  1188. static void record_and_restart(struct perf_event *event, unsigned long val,
  1189. struct pt_regs *regs)
  1190. {
  1191. u64 period = event->hw.sample_period;
  1192. s64 prev, delta, left;
  1193. int record = 0;
  1194. if (event->hw.state & PERF_HES_STOPPED) {
  1195. write_pmc(event->hw.idx, 0);
  1196. return;
  1197. }
  1198. /* we don't have to worry about interrupts here */
  1199. prev = local64_read(&event->hw.prev_count);
  1200. delta = check_and_compute_delta(prev, val);
  1201. local64_add(delta, &event->count);
  1202. /*
  1203. * See if the total period for this event has expired,
  1204. * and update for the next period.
  1205. */
  1206. val = 0;
  1207. left = local64_read(&event->hw.period_left) - delta;
  1208. if (delta == 0)
  1209. left++;
  1210. if (period) {
  1211. if (left <= 0) {
  1212. left += period;
  1213. if (left <= 0)
  1214. left = period;
  1215. record = siar_valid(regs);
  1216. event->hw.last_period = event->hw.sample_period;
  1217. }
  1218. if (left < 0x80000000LL)
  1219. val = 0x80000000LL - left;
  1220. }
  1221. write_pmc(event->hw.idx, val);
  1222. local64_set(&event->hw.prev_count, val);
  1223. local64_set(&event->hw.period_left, left);
  1224. perf_event_update_userpage(event);
  1225. /*
  1226. * Finally record data if requested.
  1227. */
  1228. if (record) {
  1229. struct perf_sample_data data;
  1230. perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
  1231. if (event->attr.sample_type & PERF_SAMPLE_ADDR)
  1232. perf_get_data_addr(regs, &data.addr);
  1233. if (perf_event_overflow(event, &data, regs))
  1234. power_pmu_stop(event, 0);
  1235. }
  1236. }
  1237. /*
  1238. * Called from generic code to get the misc flags (i.e. processor mode)
  1239. * for an event_id.
  1240. */
  1241. unsigned long perf_misc_flags(struct pt_regs *regs)
  1242. {
  1243. u32 flags = perf_get_misc_flags(regs);
  1244. if (flags)
  1245. return flags;
  1246. return user_mode(regs) ? PERF_RECORD_MISC_USER :
  1247. PERF_RECORD_MISC_KERNEL;
  1248. }
  1249. /*
  1250. * Called from generic code to get the instruction pointer
  1251. * for an event_id.
  1252. */
  1253. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1254. {
  1255. unsigned long use_siar = regs->result;
  1256. if (use_siar && siar_valid(regs))
  1257. return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
  1258. else if (use_siar)
  1259. return 0; // no valid instruction pointer
  1260. else
  1261. return regs->nip;
  1262. }
  1263. static bool pmc_overflow_power7(unsigned long val)
  1264. {
  1265. /*
  1266. * Events on POWER7 can roll back if a speculative event doesn't
  1267. * eventually complete. Unfortunately in some rare cases they will
  1268. * raise a performance monitor exception. We need to catch this to
  1269. * ensure we reset the PMC. In all cases the PMC will be 256 or less
  1270. * cycles from overflow.
  1271. *
  1272. * We only do this if the first pass fails to find any overflowing
  1273. * PMCs because a user might set a period of less than 256 and we
  1274. * don't want to mistakenly reset them.
  1275. */
  1276. if ((0x80000000 - val) <= 256)
  1277. return true;
  1278. return false;
  1279. }
  1280. static bool pmc_overflow(unsigned long val)
  1281. {
  1282. if ((int)val < 0)
  1283. return true;
  1284. return false;
  1285. }
  1286. /*
  1287. * Performance monitor interrupt stuff
  1288. */
  1289. static void perf_event_interrupt(struct pt_regs *regs)
  1290. {
  1291. int i, j;
  1292. struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  1293. struct perf_event *event;
  1294. unsigned long val[8];
  1295. int found, active;
  1296. int nmi;
  1297. if (cpuhw->n_limited)
  1298. freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
  1299. mfspr(SPRN_PMC6));
  1300. perf_read_regs(regs);
  1301. nmi = perf_intr_is_nmi(regs);
  1302. if (nmi)
  1303. nmi_enter();
  1304. else
  1305. irq_enter();
  1306. /* Read all the PMCs since we'll need them a bunch of times */
  1307. for (i = 0; i < ppmu->n_counter; ++i)
  1308. val[i] = read_pmc(i + 1);
  1309. /* Try to find what caused the IRQ */
  1310. found = 0;
  1311. for (i = 0; i < ppmu->n_counter; ++i) {
  1312. if (!pmc_overflow(val[i]))
  1313. continue;
  1314. if (is_limited_pmc(i + 1))
  1315. continue; /* these won't generate IRQs */
  1316. /*
  1317. * We've found one that's overflowed. For active
  1318. * counters we need to log this. For inactive
  1319. * counters, we need to reset it anyway
  1320. */
  1321. found = 1;
  1322. active = 0;
  1323. for (j = 0; j < cpuhw->n_events; ++j) {
  1324. event = cpuhw->event[j];
  1325. if (event->hw.idx == (i + 1)) {
  1326. active = 1;
  1327. record_and_restart(event, val[i], regs);
  1328. break;
  1329. }
  1330. }
  1331. if (!active)
  1332. /* reset non active counters that have overflowed */
  1333. write_pmc(i + 1, 0);
  1334. }
  1335. if (!found && pvr_version_is(PVR_POWER7)) {
  1336. /* check active counters for special buggy p7 overflow */
  1337. for (i = 0; i < cpuhw->n_events; ++i) {
  1338. event = cpuhw->event[i];
  1339. if (!event->hw.idx || is_limited_pmc(event->hw.idx))
  1340. continue;
  1341. if (pmc_overflow_power7(val[event->hw.idx - 1])) {
  1342. /* event has overflowed in a buggy way*/
  1343. found = 1;
  1344. record_and_restart(event,
  1345. val[event->hw.idx - 1],
  1346. regs);
  1347. }
  1348. }
  1349. }
  1350. if ((!found) && printk_ratelimit())
  1351. printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
  1352. /*
  1353. * Reset MMCR0 to its normal value. This will set PMXE and
  1354. * clear FC (freeze counters) and PMAO (perf mon alert occurred)
  1355. * and thus allow interrupts to occur again.
  1356. * XXX might want to use MSR.PM to keep the events frozen until
  1357. * we get back out of this interrupt.
  1358. */
  1359. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  1360. if (nmi)
  1361. nmi_exit();
  1362. else
  1363. irq_exit();
  1364. }
  1365. static void power_pmu_setup(int cpu)
  1366. {
  1367. struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
  1368. if (!ppmu)
  1369. return;
  1370. memset(cpuhw, 0, sizeof(*cpuhw));
  1371. cpuhw->mmcr[0] = MMCR0_FC;
  1372. }
  1373. static int __cpuinit
  1374. power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  1375. {
  1376. unsigned int cpu = (long)hcpu;
  1377. switch (action & ~CPU_TASKS_FROZEN) {
  1378. case CPU_UP_PREPARE:
  1379. power_pmu_setup(cpu);
  1380. break;
  1381. default:
  1382. break;
  1383. }
  1384. return NOTIFY_OK;
  1385. }
  1386. int __cpuinit register_power_pmu(struct power_pmu *pmu)
  1387. {
  1388. if (ppmu)
  1389. return -EBUSY; /* something's already registered */
  1390. ppmu = pmu;
  1391. pr_info("%s performance monitor hardware support registered\n",
  1392. pmu->name);
  1393. power_pmu.attr_groups = ppmu->attr_groups;
  1394. #ifdef MSR_HV
  1395. /*
  1396. * Use FCHV to ignore kernel events if MSR.HV is set.
  1397. */
  1398. if (mfmsr() & MSR_HV)
  1399. freeze_events_kernel = MMCR0_FCHV;
  1400. #endif /* CONFIG_PPC64 */
  1401. perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
  1402. perf_cpu_notifier(power_pmu_notifier);
  1403. return 0;
  1404. }