perf_event.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. #undef DEBUG
  2. /*
  3. * ARM performance counter support.
  4. *
  5. * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  6. * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  7. *
  8. * This code is based on the sparc64 perf event code, which is in turn based
  9. * on the x86 code. Callchain code is based on the ARM OProfile backtrace
  10. * code.
  11. */
  12. #define pr_fmt(fmt) "hw perfevents: " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/uaccess.h>
  17. #include <asm/irq_regs.h>
  18. #include <asm/pmu.h>
  19. #include <asm/stacktrace.h>
  20. static int
  21. armpmu_map_cache_event(const unsigned (*cache_map)
  22. [PERF_COUNT_HW_CACHE_MAX]
  23. [PERF_COUNT_HW_CACHE_OP_MAX]
  24. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  25. u64 config)
  26. {
  27. unsigned int cache_type, cache_op, cache_result, ret;
  28. cache_type = (config >> 0) & 0xff;
  29. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  30. return -EINVAL;
  31. cache_op = (config >> 8) & 0xff;
  32. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  33. return -EINVAL;
  34. cache_result = (config >> 16) & 0xff;
  35. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  36. return -EINVAL;
  37. ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
  38. if (ret == CACHE_OP_UNSUPPORTED)
  39. return -ENOENT;
  40. return ret;
  41. }
  42. static int
  43. armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
  44. {
  45. int mapping = (*event_map)[config];
  46. return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
  47. }
  48. static int
  49. armpmu_map_raw_event(u32 raw_event_mask, u64 config)
  50. {
  51. return (int)(config & raw_event_mask);
  52. }
  53. int
  54. armpmu_map_event(struct perf_event *event,
  55. const unsigned (*event_map)[PERF_COUNT_HW_MAX],
  56. const unsigned (*cache_map)
  57. [PERF_COUNT_HW_CACHE_MAX]
  58. [PERF_COUNT_HW_CACHE_OP_MAX]
  59. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  60. u32 raw_event_mask)
  61. {
  62. u64 config = event->attr.config;
  63. switch (event->attr.type) {
  64. case PERF_TYPE_HARDWARE:
  65. return armpmu_map_hw_event(event_map, config);
  66. case PERF_TYPE_HW_CACHE:
  67. return armpmu_map_cache_event(cache_map, config);
  68. case PERF_TYPE_RAW:
  69. return armpmu_map_raw_event(raw_event_mask, config);
  70. }
  71. return -ENOENT;
  72. }
  73. int armpmu_event_set_period(struct perf_event *event)
  74. {
  75. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  76. struct hw_perf_event *hwc = &event->hw;
  77. s64 left = local64_read(&hwc->period_left);
  78. s64 period = hwc->sample_period;
  79. int ret = 0;
  80. /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
  81. if (unlikely(period != hwc->last_period))
  82. left = period - (hwc->last_period - left);
  83. if (unlikely(left <= -period)) {
  84. left = period;
  85. local64_set(&hwc->period_left, left);
  86. hwc->last_period = period;
  87. ret = 1;
  88. }
  89. if (unlikely(left <= 0)) {
  90. left += period;
  91. local64_set(&hwc->period_left, left);
  92. hwc->last_period = period;
  93. ret = 1;
  94. }
  95. if (left > (s64)armpmu->max_period)
  96. left = armpmu->max_period;
  97. local64_set(&hwc->prev_count, (u64)-left);
  98. armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
  99. perf_event_update_userpage(event);
  100. return ret;
  101. }
  102. u64 armpmu_event_update(struct perf_event *event)
  103. {
  104. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  105. struct hw_perf_event *hwc = &event->hw;
  106. u64 delta, prev_raw_count, new_raw_count;
  107. again:
  108. prev_raw_count = local64_read(&hwc->prev_count);
  109. new_raw_count = armpmu->read_counter(event);
  110. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  111. new_raw_count) != prev_raw_count)
  112. goto again;
  113. delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
  114. local64_add(delta, &event->count);
  115. local64_sub(delta, &hwc->period_left);
  116. return new_raw_count;
  117. }
  118. static void
  119. armpmu_read(struct perf_event *event)
  120. {
  121. struct hw_perf_event *hwc = &event->hw;
  122. /* Don't read disabled counters! */
  123. if (hwc->idx < 0)
  124. return;
  125. armpmu_event_update(event);
  126. }
  127. static void
  128. armpmu_stop(struct perf_event *event, int flags)
  129. {
  130. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  131. struct hw_perf_event *hwc = &event->hw;
  132. /*
  133. * ARM pmu always has to update the counter, so ignore
  134. * PERF_EF_UPDATE, see comments in armpmu_start().
  135. */
  136. if (!(hwc->state & PERF_HES_STOPPED)) {
  137. armpmu->disable(event);
  138. armpmu_event_update(event);
  139. hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  140. }
  141. }
  142. static void armpmu_start(struct perf_event *event, int flags)
  143. {
  144. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  145. struct hw_perf_event *hwc = &event->hw;
  146. /*
  147. * ARM pmu always has to reprogram the period, so ignore
  148. * PERF_EF_RELOAD, see the comment below.
  149. */
  150. if (flags & PERF_EF_RELOAD)
  151. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  152. hwc->state = 0;
  153. /*
  154. * Set the period again. Some counters can't be stopped, so when we
  155. * were stopped we simply disabled the IRQ source and the counter
  156. * may have been left counting. If we don't do this step then we may
  157. * get an interrupt too soon or *way* too late if the overflow has
  158. * happened since disabling.
  159. */
  160. armpmu_event_set_period(event);
  161. armpmu->enable(event);
  162. }
  163. static void
  164. armpmu_del(struct perf_event *event, int flags)
  165. {
  166. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  167. struct pmu_hw_events *hw_events = armpmu->get_hw_events();
  168. struct hw_perf_event *hwc = &event->hw;
  169. int idx = hwc->idx;
  170. WARN_ON(idx < 0);
  171. armpmu_stop(event, PERF_EF_UPDATE);
  172. hw_events->events[idx] = NULL;
  173. clear_bit(idx, hw_events->used_mask);
  174. perf_event_update_userpage(event);
  175. }
  176. static int
  177. armpmu_add(struct perf_event *event, int flags)
  178. {
  179. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  180. struct pmu_hw_events *hw_events = armpmu->get_hw_events();
  181. struct hw_perf_event *hwc = &event->hw;
  182. int idx;
  183. int err = 0;
  184. perf_pmu_disable(event->pmu);
  185. /* If we don't have a space for the counter then finish early. */
  186. idx = armpmu->get_event_idx(hw_events, event);
  187. if (idx < 0) {
  188. err = idx;
  189. goto out;
  190. }
  191. /*
  192. * If there is an event in the counter we are going to use then make
  193. * sure it is disabled.
  194. */
  195. event->hw.idx = idx;
  196. armpmu->disable(event);
  197. hw_events->events[idx] = event;
  198. hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  199. if (flags & PERF_EF_START)
  200. armpmu_start(event, PERF_EF_RELOAD);
  201. /* Propagate our changes to the userspace mapping. */
  202. perf_event_update_userpage(event);
  203. out:
  204. perf_pmu_enable(event->pmu);
  205. return err;
  206. }
  207. static int
  208. validate_event(struct pmu_hw_events *hw_events,
  209. struct perf_event *event)
  210. {
  211. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  212. struct pmu *leader_pmu = event->group_leader->pmu;
  213. if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
  214. return 1;
  215. return armpmu->get_event_idx(hw_events, event) >= 0;
  216. }
  217. static int
  218. validate_group(struct perf_event *event)
  219. {
  220. struct perf_event *sibling, *leader = event->group_leader;
  221. struct pmu_hw_events fake_pmu;
  222. DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
  223. /*
  224. * Initialise the fake PMU. We only need to populate the
  225. * used_mask for the purposes of validation.
  226. */
  227. memset(fake_used_mask, 0, sizeof(fake_used_mask));
  228. fake_pmu.used_mask = fake_used_mask;
  229. if (!validate_event(&fake_pmu, leader))
  230. return -EINVAL;
  231. list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
  232. if (!validate_event(&fake_pmu, sibling))
  233. return -EINVAL;
  234. }
  235. if (!validate_event(&fake_pmu, event))
  236. return -EINVAL;
  237. return 0;
  238. }
  239. static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
  240. {
  241. struct arm_pmu *armpmu = (struct arm_pmu *) dev;
  242. struct platform_device *plat_device = armpmu->plat_device;
  243. struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
  244. if (plat && plat->handle_irq)
  245. return plat->handle_irq(irq, dev, armpmu->handle_irq);
  246. else
  247. return armpmu->handle_irq(irq, dev);
  248. }
  249. static void
  250. armpmu_release_hardware(struct arm_pmu *armpmu)
  251. {
  252. armpmu->free_irq(armpmu);
  253. pm_runtime_put_sync(&armpmu->plat_device->dev);
  254. }
  255. static int
  256. armpmu_reserve_hardware(struct arm_pmu *armpmu)
  257. {
  258. int err;
  259. struct platform_device *pmu_device = armpmu->plat_device;
  260. if (!pmu_device)
  261. return -ENODEV;
  262. pm_runtime_get_sync(&pmu_device->dev);
  263. err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
  264. if (err) {
  265. armpmu_release_hardware(armpmu);
  266. return err;
  267. }
  268. return 0;
  269. }
  270. static void
  271. hw_perf_event_destroy(struct perf_event *event)
  272. {
  273. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  274. atomic_t *active_events = &armpmu->active_events;
  275. struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
  276. if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
  277. armpmu_release_hardware(armpmu);
  278. mutex_unlock(pmu_reserve_mutex);
  279. }
  280. }
  281. static int
  282. event_requires_mode_exclusion(struct perf_event_attr *attr)
  283. {
  284. return attr->exclude_idle || attr->exclude_user ||
  285. attr->exclude_kernel || attr->exclude_hv;
  286. }
  287. static int
  288. __hw_perf_event_init(struct perf_event *event)
  289. {
  290. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  291. struct hw_perf_event *hwc = &event->hw;
  292. int mapping, err;
  293. mapping = armpmu->map_event(event);
  294. if (mapping < 0) {
  295. pr_debug("event %x:%llx not supported\n", event->attr.type,
  296. event->attr.config);
  297. return mapping;
  298. }
  299. /*
  300. * We don't assign an index until we actually place the event onto
  301. * hardware. Use -1 to signify that we haven't decided where to put it
  302. * yet. For SMP systems, each core has it's own PMU so we can't do any
  303. * clever allocation or constraints checking at this point.
  304. */
  305. hwc->idx = -1;
  306. hwc->config_base = 0;
  307. hwc->config = 0;
  308. hwc->event_base = 0;
  309. /*
  310. * Check whether we need to exclude the counter from certain modes.
  311. */
  312. if ((!armpmu->set_event_filter ||
  313. armpmu->set_event_filter(hwc, &event->attr)) &&
  314. event_requires_mode_exclusion(&event->attr)) {
  315. pr_debug("ARM performance counters do not support "
  316. "mode exclusion\n");
  317. return -EOPNOTSUPP;
  318. }
  319. /*
  320. * Store the event encoding into the config_base field.
  321. */
  322. hwc->config_base |= (unsigned long)mapping;
  323. if (!hwc->sample_period) {
  324. /*
  325. * For non-sampling runs, limit the sample_period to half
  326. * of the counter width. That way, the new counter value
  327. * is far less likely to overtake the previous one unless
  328. * you have some serious IRQ latency issues.
  329. */
  330. hwc->sample_period = armpmu->max_period >> 1;
  331. hwc->last_period = hwc->sample_period;
  332. local64_set(&hwc->period_left, hwc->sample_period);
  333. }
  334. err = 0;
  335. if (event->group_leader != event) {
  336. err = validate_group(event);
  337. if (err)
  338. return -EINVAL;
  339. }
  340. return err;
  341. }
  342. static int armpmu_event_init(struct perf_event *event)
  343. {
  344. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  345. int err = 0;
  346. atomic_t *active_events = &armpmu->active_events;
  347. /* does not support taken branch sampling */
  348. if (has_branch_stack(event))
  349. return -EOPNOTSUPP;
  350. if (armpmu->map_event(event) == -ENOENT)
  351. return -ENOENT;
  352. event->destroy = hw_perf_event_destroy;
  353. if (!atomic_inc_not_zero(active_events)) {
  354. mutex_lock(&armpmu->reserve_mutex);
  355. if (atomic_read(active_events) == 0)
  356. err = armpmu_reserve_hardware(armpmu);
  357. if (!err)
  358. atomic_inc(active_events);
  359. mutex_unlock(&armpmu->reserve_mutex);
  360. }
  361. if (err)
  362. return err;
  363. err = __hw_perf_event_init(event);
  364. if (err)
  365. hw_perf_event_destroy(event);
  366. return err;
  367. }
  368. static void armpmu_enable(struct pmu *pmu)
  369. {
  370. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  371. struct pmu_hw_events *hw_events = armpmu->get_hw_events();
  372. int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
  373. if (enabled)
  374. armpmu->start(armpmu);
  375. }
  376. static void armpmu_disable(struct pmu *pmu)
  377. {
  378. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  379. armpmu->stop(armpmu);
  380. }
  381. #ifdef CONFIG_PM_RUNTIME
  382. static int armpmu_runtime_resume(struct device *dev)
  383. {
  384. struct arm_pmu_platdata *plat = dev_get_platdata(dev);
  385. if (plat && plat->runtime_resume)
  386. return plat->runtime_resume(dev);
  387. return 0;
  388. }
  389. static int armpmu_runtime_suspend(struct device *dev)
  390. {
  391. struct arm_pmu_platdata *plat = dev_get_platdata(dev);
  392. if (plat && plat->runtime_suspend)
  393. return plat->runtime_suspend(dev);
  394. return 0;
  395. }
  396. #endif
  397. const struct dev_pm_ops armpmu_dev_pm_ops = {
  398. SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
  399. };
  400. static void __init armpmu_init(struct arm_pmu *armpmu)
  401. {
  402. atomic_set(&armpmu->active_events, 0);
  403. mutex_init(&armpmu->reserve_mutex);
  404. armpmu->pmu = (struct pmu) {
  405. .pmu_enable = armpmu_enable,
  406. .pmu_disable = armpmu_disable,
  407. .event_init = armpmu_event_init,
  408. .add = armpmu_add,
  409. .del = armpmu_del,
  410. .start = armpmu_start,
  411. .stop = armpmu_stop,
  412. .read = armpmu_read,
  413. };
  414. }
  415. int armpmu_register(struct arm_pmu *armpmu, int type)
  416. {
  417. armpmu_init(armpmu);
  418. pm_runtime_enable(&armpmu->plat_device->dev);
  419. pr_info("enabled with %s PMU driver, %d counters available\n",
  420. armpmu->name, armpmu->num_events);
  421. return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
  422. }
  423. /*
  424. * Callchain handling code.
  425. */
  426. /*
  427. * The registers we're interested in are at the end of the variable
  428. * length saved register structure. The fp points at the end of this
  429. * structure so the address of this struct is:
  430. * (struct frame_tail *)(xxx->fp)-1
  431. *
  432. * This code has been adapted from the ARM OProfile support.
  433. */
  434. struct frame_tail {
  435. struct frame_tail __user *fp;
  436. unsigned long sp;
  437. unsigned long lr;
  438. } __attribute__((packed));
  439. /*
  440. * Get the return address for a single stackframe and return a pointer to the
  441. * next frame tail.
  442. */
  443. static struct frame_tail __user *
  444. user_backtrace(struct frame_tail __user *tail,
  445. struct perf_callchain_entry *entry)
  446. {
  447. struct frame_tail buftail;
  448. /* Also check accessibility of one struct frame_tail beyond */
  449. if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
  450. return NULL;
  451. if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
  452. return NULL;
  453. perf_callchain_store(entry, buftail.lr);
  454. /*
  455. * Frame pointers should strictly progress back up the stack
  456. * (towards higher addresses).
  457. */
  458. if (tail + 1 >= buftail.fp)
  459. return NULL;
  460. return buftail.fp - 1;
  461. }
  462. void
  463. perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
  464. {
  465. struct frame_tail __user *tail;
  466. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  467. /* We don't support guest os callchain now */
  468. return;
  469. }
  470. tail = (struct frame_tail __user *)regs->ARM_fp - 1;
  471. while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
  472. tail && !((unsigned long)tail & 0x3))
  473. tail = user_backtrace(tail, entry);
  474. }
  475. /*
  476. * Gets called by walk_stackframe() for every stackframe. This will be called
  477. * whist unwinding the stackframe and is like a subroutine return so we use
  478. * the PC.
  479. */
  480. static int
  481. callchain_trace(struct stackframe *fr,
  482. void *data)
  483. {
  484. struct perf_callchain_entry *entry = data;
  485. perf_callchain_store(entry, fr->pc);
  486. return 0;
  487. }
  488. void
  489. perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
  490. {
  491. struct stackframe fr;
  492. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  493. /* We don't support guest os callchain now */
  494. return;
  495. }
  496. fr.fp = regs->ARM_fp;
  497. fr.sp = regs->ARM_sp;
  498. fr.lr = regs->ARM_lr;
  499. fr.pc = regs->ARM_pc;
  500. walk_stackframe(&fr, callchain_trace, entry);
  501. }
  502. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  503. {
  504. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  505. return perf_guest_cbs->get_guest_ip();
  506. return instruction_pointer(regs);
  507. }
  508. unsigned long perf_misc_flags(struct pt_regs *regs)
  509. {
  510. int misc = 0;
  511. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  512. if (perf_guest_cbs->is_user_mode())
  513. misc |= PERF_RECORD_MISC_GUEST_USER;
  514. else
  515. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  516. } else {
  517. if (user_mode(regs))
  518. misc |= PERF_RECORD_MISC_USER;
  519. else
  520. misc |= PERF_RECORD_MISC_KERNEL;
  521. }
  522. return misc;
  523. }