core-fsl-emb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * Performance event support - Freescale Embedded Performance Monitor
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. * Copyright 2010 Freescale Semiconductor, Inc.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/perf_event.h>
  15. #include <linux/percpu.h>
  16. #include <linux/hardirq.h>
  17. #include <asm/reg_fsl_emb.h>
  18. #include <asm/pmc.h>
  19. #include <asm/machdep.h>
  20. #include <asm/firmware.h>
  21. #include <asm/ptrace.h>
  22. struct cpu_hw_events {
  23. int n_events;
  24. int disabled;
  25. u8 pmcs_enabled;
  26. struct perf_event *event[MAX_HWEVENTS];
  27. };
  28. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  29. static struct fsl_emb_pmu *ppmu;
  30. /* Number of perf_events counting hardware events */
  31. static atomic_t num_events;
  32. /* Used to avoid races in calling reserve/release_pmc_hardware */
  33. static DEFINE_MUTEX(pmc_reserve_mutex);
  34. /*
  35. * If interrupts were soft-disabled when a PMU interrupt occurs, treat
  36. * it as an NMI.
  37. */
  38. static inline int perf_intr_is_nmi(struct pt_regs *regs)
  39. {
  40. #ifdef __powerpc64__
  41. return !regs->softe;
  42. #else
  43. return 0;
  44. #endif
  45. }
  46. static void perf_event_interrupt(struct pt_regs *regs);
  47. /*
  48. * Read one performance monitor counter (PMC).
  49. */
  50. static unsigned long read_pmc(int idx)
  51. {
  52. unsigned long val;
  53. switch (idx) {
  54. case 0:
  55. val = mfpmr(PMRN_PMC0);
  56. break;
  57. case 1:
  58. val = mfpmr(PMRN_PMC1);
  59. break;
  60. case 2:
  61. val = mfpmr(PMRN_PMC2);
  62. break;
  63. case 3:
  64. val = mfpmr(PMRN_PMC3);
  65. break;
  66. default:
  67. printk(KERN_ERR "oops trying to read PMC%d\n", idx);
  68. val = 0;
  69. }
  70. return val;
  71. }
  72. /*
  73. * Write one PMC.
  74. */
  75. static void write_pmc(int idx, unsigned long val)
  76. {
  77. switch (idx) {
  78. case 0:
  79. mtpmr(PMRN_PMC0, val);
  80. break;
  81. case 1:
  82. mtpmr(PMRN_PMC1, val);
  83. break;
  84. case 2:
  85. mtpmr(PMRN_PMC2, val);
  86. break;
  87. case 3:
  88. mtpmr(PMRN_PMC3, val);
  89. break;
  90. default:
  91. printk(KERN_ERR "oops trying to write PMC%d\n", idx);
  92. }
  93. isync();
  94. }
  95. /*
  96. * Write one local control A register
  97. */
  98. static void write_pmlca(int idx, unsigned long val)
  99. {
  100. switch (idx) {
  101. case 0:
  102. mtpmr(PMRN_PMLCA0, val);
  103. break;
  104. case 1:
  105. mtpmr(PMRN_PMLCA1, val);
  106. break;
  107. case 2:
  108. mtpmr(PMRN_PMLCA2, val);
  109. break;
  110. case 3:
  111. mtpmr(PMRN_PMLCA3, val);
  112. break;
  113. default:
  114. printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
  115. }
  116. isync();
  117. }
  118. /*
  119. * Write one local control B register
  120. */
  121. static void write_pmlcb(int idx, unsigned long val)
  122. {
  123. switch (idx) {
  124. case 0:
  125. mtpmr(PMRN_PMLCB0, val);
  126. break;
  127. case 1:
  128. mtpmr(PMRN_PMLCB1, val);
  129. break;
  130. case 2:
  131. mtpmr(PMRN_PMLCB2, val);
  132. break;
  133. case 3:
  134. mtpmr(PMRN_PMLCB3, val);
  135. break;
  136. default:
  137. printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
  138. }
  139. isync();
  140. }
  141. static void fsl_emb_pmu_read(struct perf_event *event)
  142. {
  143. s64 val, delta, prev;
  144. if (event->hw.state & PERF_HES_STOPPED)
  145. return;
  146. /*
  147. * Performance monitor interrupts come even when interrupts
  148. * are soft-disabled, as long as interrupts are hard-enabled.
  149. * Therefore we treat them like NMIs.
  150. */
  151. do {
  152. prev = local64_read(&event->hw.prev_count);
  153. barrier();
  154. val = read_pmc(event->hw.idx);
  155. } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
  156. /* The counters are only 32 bits wide */
  157. delta = (val - prev) & 0xfffffffful;
  158. local64_add(delta, &event->count);
  159. local64_sub(delta, &event->hw.period_left);
  160. }
  161. /*
  162. * Disable all events to prevent PMU interrupts and to allow
  163. * events to be added or removed.
  164. */
  165. static void fsl_emb_pmu_disable(struct pmu *pmu)
  166. {
  167. struct cpu_hw_events *cpuhw;
  168. unsigned long flags;
  169. local_irq_save(flags);
  170. cpuhw = &__get_cpu_var(cpu_hw_events);
  171. if (!cpuhw->disabled) {
  172. cpuhw->disabled = 1;
  173. /*
  174. * Check if we ever enabled the PMU on this cpu.
  175. */
  176. if (!cpuhw->pmcs_enabled) {
  177. ppc_enable_pmcs();
  178. cpuhw->pmcs_enabled = 1;
  179. }
  180. if (atomic_read(&num_events)) {
  181. /*
  182. * Set the 'freeze all counters' bit, and disable
  183. * interrupts. The barrier is to make sure the
  184. * mtpmr has been executed and the PMU has frozen
  185. * the events before we return.
  186. */
  187. mtpmr(PMRN_PMGC0, PMGC0_FAC);
  188. isync();
  189. }
  190. }
  191. local_irq_restore(flags);
  192. }
  193. /*
  194. * Re-enable all events if disable == 0.
  195. * If we were previously disabled and events were added, then
  196. * put the new config on the PMU.
  197. */
  198. static void fsl_emb_pmu_enable(struct pmu *pmu)
  199. {
  200. struct cpu_hw_events *cpuhw;
  201. unsigned long flags;
  202. local_irq_save(flags);
  203. cpuhw = &__get_cpu_var(cpu_hw_events);
  204. if (!cpuhw->disabled)
  205. goto out;
  206. cpuhw->disabled = 0;
  207. ppc_set_pmu_inuse(cpuhw->n_events != 0);
  208. if (cpuhw->n_events > 0) {
  209. mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
  210. isync();
  211. }
  212. out:
  213. local_irq_restore(flags);
  214. }
  215. static int collect_events(struct perf_event *group, int max_count,
  216. struct perf_event *ctrs[])
  217. {
  218. int n = 0;
  219. struct perf_event *event;
  220. if (!is_software_event(group)) {
  221. if (n >= max_count)
  222. return -1;
  223. ctrs[n] = group;
  224. n++;
  225. }
  226. list_for_each_entry(event, &group->sibling_list, group_entry) {
  227. if (!is_software_event(event) &&
  228. event->state != PERF_EVENT_STATE_OFF) {
  229. if (n >= max_count)
  230. return -1;
  231. ctrs[n] = event;
  232. n++;
  233. }
  234. }
  235. return n;
  236. }
  237. /* context locked on entry */
  238. static int fsl_emb_pmu_add(struct perf_event *event, int flags)
  239. {
  240. struct cpu_hw_events *cpuhw;
  241. int ret = -EAGAIN;
  242. int num_counters = ppmu->n_counter;
  243. u64 val;
  244. int i;
  245. perf_pmu_disable(event->pmu);
  246. cpuhw = &get_cpu_var(cpu_hw_events);
  247. if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
  248. num_counters = ppmu->n_restricted;
  249. /*
  250. * Allocate counters from top-down, so that restricted-capable
  251. * counters are kept free as long as possible.
  252. */
  253. for (i = num_counters - 1; i >= 0; i--) {
  254. if (cpuhw->event[i])
  255. continue;
  256. break;
  257. }
  258. if (i < 0)
  259. goto out;
  260. event->hw.idx = i;
  261. cpuhw->event[i] = event;
  262. ++cpuhw->n_events;
  263. val = 0;
  264. if (event->hw.sample_period) {
  265. s64 left = local64_read(&event->hw.period_left);
  266. if (left < 0x80000000L)
  267. val = 0x80000000L - left;
  268. }
  269. local64_set(&event->hw.prev_count, val);
  270. if (!(flags & PERF_EF_START)) {
  271. event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  272. val = 0;
  273. }
  274. write_pmc(i, val);
  275. perf_event_update_userpage(event);
  276. write_pmlcb(i, event->hw.config >> 32);
  277. write_pmlca(i, event->hw.config_base);
  278. ret = 0;
  279. out:
  280. put_cpu_var(cpu_hw_events);
  281. perf_pmu_enable(event->pmu);
  282. return ret;
  283. }
  284. /* context locked on entry */
  285. static void fsl_emb_pmu_del(struct perf_event *event, int flags)
  286. {
  287. struct cpu_hw_events *cpuhw;
  288. int i = event->hw.idx;
  289. perf_pmu_disable(event->pmu);
  290. if (i < 0)
  291. goto out;
  292. fsl_emb_pmu_read(event);
  293. cpuhw = &get_cpu_var(cpu_hw_events);
  294. WARN_ON(event != cpuhw->event[event->hw.idx]);
  295. write_pmlca(i, 0);
  296. write_pmlcb(i, 0);
  297. write_pmc(i, 0);
  298. cpuhw->event[i] = NULL;
  299. event->hw.idx = -1;
  300. /*
  301. * TODO: if at least one restricted event exists, and we
  302. * just freed up a non-restricted-capable counter, and
  303. * there is a restricted-capable counter occupied by
  304. * a non-restricted event, migrate that event to the
  305. * vacated counter.
  306. */
  307. cpuhw->n_events--;
  308. out:
  309. perf_pmu_enable(event->pmu);
  310. put_cpu_var(cpu_hw_events);
  311. }
  312. static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
  313. {
  314. unsigned long flags;
  315. s64 left;
  316. if (event->hw.idx < 0 || !event->hw.sample_period)
  317. return;
  318. if (!(event->hw.state & PERF_HES_STOPPED))
  319. return;
  320. if (ef_flags & PERF_EF_RELOAD)
  321. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  322. local_irq_save(flags);
  323. perf_pmu_disable(event->pmu);
  324. event->hw.state = 0;
  325. left = local64_read(&event->hw.period_left);
  326. write_pmc(event->hw.idx, left);
  327. perf_event_update_userpage(event);
  328. perf_pmu_enable(event->pmu);
  329. local_irq_restore(flags);
  330. }
  331. static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
  332. {
  333. unsigned long flags;
  334. if (event->hw.idx < 0 || !event->hw.sample_period)
  335. return;
  336. if (event->hw.state & PERF_HES_STOPPED)
  337. return;
  338. local_irq_save(flags);
  339. perf_pmu_disable(event->pmu);
  340. fsl_emb_pmu_read(event);
  341. event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  342. write_pmc(event->hw.idx, 0);
  343. perf_event_update_userpage(event);
  344. perf_pmu_enable(event->pmu);
  345. local_irq_restore(flags);
  346. }
  347. /*
  348. * Release the PMU if this is the last perf_event.
  349. */
  350. static void hw_perf_event_destroy(struct perf_event *event)
  351. {
  352. if (!atomic_add_unless(&num_events, -1, 1)) {
  353. mutex_lock(&pmc_reserve_mutex);
  354. if (atomic_dec_return(&num_events) == 0)
  355. release_pmc_hardware();
  356. mutex_unlock(&pmc_reserve_mutex);
  357. }
  358. }
  359. /*
  360. * Translate a generic cache event_id config to a raw event_id code.
  361. */
  362. static int hw_perf_cache_event(u64 config, u64 *eventp)
  363. {
  364. unsigned long type, op, result;
  365. int ev;
  366. if (!ppmu->cache_events)
  367. return -EINVAL;
  368. /* unpack config */
  369. type = config & 0xff;
  370. op = (config >> 8) & 0xff;
  371. result = (config >> 16) & 0xff;
  372. if (type >= PERF_COUNT_HW_CACHE_MAX ||
  373. op >= PERF_COUNT_HW_CACHE_OP_MAX ||
  374. result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  375. return -EINVAL;
  376. ev = (*ppmu->cache_events)[type][op][result];
  377. if (ev == 0)
  378. return -EOPNOTSUPP;
  379. if (ev == -1)
  380. return -EINVAL;
  381. *eventp = ev;
  382. return 0;
  383. }
  384. static int fsl_emb_pmu_event_init(struct perf_event *event)
  385. {
  386. u64 ev;
  387. struct perf_event *events[MAX_HWEVENTS];
  388. int n;
  389. int err;
  390. int num_restricted;
  391. int i;
  392. if (ppmu->n_counter > MAX_HWEVENTS) {
  393. WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
  394. ppmu->n_counter, MAX_HWEVENTS);
  395. ppmu->n_counter = MAX_HWEVENTS;
  396. }
  397. switch (event->attr.type) {
  398. case PERF_TYPE_HARDWARE:
  399. ev = event->attr.config;
  400. if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
  401. return -EOPNOTSUPP;
  402. ev = ppmu->generic_events[ev];
  403. break;
  404. case PERF_TYPE_HW_CACHE:
  405. err = hw_perf_cache_event(event->attr.config, &ev);
  406. if (err)
  407. return err;
  408. break;
  409. case PERF_TYPE_RAW:
  410. ev = event->attr.config;
  411. break;
  412. default:
  413. return -ENOENT;
  414. }
  415. event->hw.config = ppmu->xlate_event(ev);
  416. if (!(event->hw.config & FSL_EMB_EVENT_VALID))
  417. return -EINVAL;
  418. /*
  419. * If this is in a group, check if it can go on with all the
  420. * other hardware events in the group. We assume the event
  421. * hasn't been linked into its leader's sibling list at this point.
  422. */
  423. n = 0;
  424. if (event->group_leader != event) {
  425. n = collect_events(event->group_leader,
  426. ppmu->n_counter - 1, events);
  427. if (n < 0)
  428. return -EINVAL;
  429. }
  430. if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
  431. num_restricted = 0;
  432. for (i = 0; i < n; i++) {
  433. if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
  434. num_restricted++;
  435. }
  436. if (num_restricted >= ppmu->n_restricted)
  437. return -EINVAL;
  438. }
  439. event->hw.idx = -1;
  440. event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
  441. (u32)((ev << 16) & PMLCA_EVENT_MASK);
  442. if (event->attr.exclude_user)
  443. event->hw.config_base |= PMLCA_FCU;
  444. if (event->attr.exclude_kernel)
  445. event->hw.config_base |= PMLCA_FCS;
  446. if (event->attr.exclude_idle)
  447. return -ENOTSUPP;
  448. event->hw.last_period = event->hw.sample_period;
  449. local64_set(&event->hw.period_left, event->hw.last_period);
  450. /*
  451. * See if we need to reserve the PMU.
  452. * If no events are currently in use, then we have to take a
  453. * mutex to ensure that we don't race with another task doing
  454. * reserve_pmc_hardware or release_pmc_hardware.
  455. */
  456. err = 0;
  457. if (!atomic_inc_not_zero(&num_events)) {
  458. mutex_lock(&pmc_reserve_mutex);
  459. if (atomic_read(&num_events) == 0 &&
  460. reserve_pmc_hardware(perf_event_interrupt))
  461. err = -EBUSY;
  462. else
  463. atomic_inc(&num_events);
  464. mutex_unlock(&pmc_reserve_mutex);
  465. mtpmr(PMRN_PMGC0, PMGC0_FAC);
  466. isync();
  467. }
  468. event->destroy = hw_perf_event_destroy;
  469. return err;
  470. }
  471. static struct pmu fsl_emb_pmu = {
  472. .pmu_enable = fsl_emb_pmu_enable,
  473. .pmu_disable = fsl_emb_pmu_disable,
  474. .event_init = fsl_emb_pmu_event_init,
  475. .add = fsl_emb_pmu_add,
  476. .del = fsl_emb_pmu_del,
  477. .start = fsl_emb_pmu_start,
  478. .stop = fsl_emb_pmu_stop,
  479. .read = fsl_emb_pmu_read,
  480. };
  481. /*
  482. * A counter has overflowed; update its count and record
  483. * things if requested. Note that interrupts are hard-disabled
  484. * here so there is no possibility of being interrupted.
  485. */
  486. static void record_and_restart(struct perf_event *event, unsigned long val,
  487. struct pt_regs *regs)
  488. {
  489. u64 period = event->hw.sample_period;
  490. s64 prev, delta, left;
  491. int record = 0;
  492. if (event->hw.state & PERF_HES_STOPPED) {
  493. write_pmc(event->hw.idx, 0);
  494. return;
  495. }
  496. /* we don't have to worry about interrupts here */
  497. prev = local64_read(&event->hw.prev_count);
  498. delta = (val - prev) & 0xfffffffful;
  499. local64_add(delta, &event->count);
  500. /*
  501. * See if the total period for this event has expired,
  502. * and update for the next period.
  503. */
  504. val = 0;
  505. left = local64_read(&event->hw.period_left) - delta;
  506. if (period) {
  507. if (left <= 0) {
  508. left += period;
  509. if (left <= 0)
  510. left = period;
  511. record = 1;
  512. event->hw.last_period = event->hw.sample_period;
  513. }
  514. if (left < 0x80000000LL)
  515. val = 0x80000000LL - left;
  516. }
  517. write_pmc(event->hw.idx, val);
  518. local64_set(&event->hw.prev_count, val);
  519. local64_set(&event->hw.period_left, left);
  520. perf_event_update_userpage(event);
  521. /*
  522. * Finally record data if requested.
  523. */
  524. if (record) {
  525. struct perf_sample_data data;
  526. perf_sample_data_init(&data, 0, event->hw.last_period);
  527. if (perf_event_overflow(event, &data, regs))
  528. fsl_emb_pmu_stop(event, 0);
  529. }
  530. }
  531. static void perf_event_interrupt(struct pt_regs *regs)
  532. {
  533. int i;
  534. struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  535. struct perf_event *event;
  536. unsigned long val;
  537. int found = 0;
  538. int nmi;
  539. nmi = perf_intr_is_nmi(regs);
  540. if (nmi)
  541. nmi_enter();
  542. else
  543. irq_enter();
  544. for (i = 0; i < ppmu->n_counter; ++i) {
  545. event = cpuhw->event[i];
  546. val = read_pmc(i);
  547. if ((int)val < 0) {
  548. if (event) {
  549. /* event has overflowed */
  550. found = 1;
  551. record_and_restart(event, val, regs);
  552. } else {
  553. /*
  554. * Disabled counter is negative,
  555. * reset it just in case.
  556. */
  557. write_pmc(i, 0);
  558. }
  559. }
  560. }
  561. /* PMM will keep counters frozen until we return from the interrupt. */
  562. mtmsr(mfmsr() | MSR_PMM);
  563. mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
  564. isync();
  565. if (nmi)
  566. nmi_exit();
  567. else
  568. irq_exit();
  569. }
  570. void hw_perf_event_setup(int cpu)
  571. {
  572. struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
  573. memset(cpuhw, 0, sizeof(*cpuhw));
  574. }
  575. int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
  576. {
  577. if (ppmu)
  578. return -EBUSY; /* something's already registered */
  579. ppmu = pmu;
  580. pr_info("%s performance monitor hardware support registered\n",
  581. pmu->name);
  582. perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
  583. return 0;
  584. }