perf_event.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399
  1. /*
  2. * PMU support
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. * Author: Will Deacon <will.deacon@arm.com>
  6. *
  7. * This code is based heavily on the ARMv7 perf event code.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #define pr_fmt(fmt) "hw perfevents: " fmt
  22. #include <linux/bitmap.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/kernel.h>
  25. #include <linux/export.h>
  26. #include <linux/perf_event.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/uaccess.h>
  30. #include <asm/cputype.h>
  31. #include <asm/irq.h>
  32. #include <asm/irq_regs.h>
  33. #include <asm/pmu.h>
  34. #include <asm/stacktrace.h>
  35. /*
  36. * ARMv8 supports a maximum of 32 events.
  37. * The cycle counter is included in this total.
  38. */
  39. #define ARMPMU_MAX_HWEVENTS 32
  40. static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
  41. static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
  42. static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
  43. #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
  44. /* Set at runtime when we know what CPU type we are. */
  45. static struct arm_pmu *cpu_pmu;
  46. int
  47. armpmu_get_max_events(void)
  48. {
  49. int max_events = 0;
  50. if (cpu_pmu != NULL)
  51. max_events = cpu_pmu->num_events;
  52. return max_events;
  53. }
  54. EXPORT_SYMBOL_GPL(armpmu_get_max_events);
  55. int perf_num_counters(void)
  56. {
  57. return armpmu_get_max_events();
  58. }
  59. EXPORT_SYMBOL_GPL(perf_num_counters);
  60. #define HW_OP_UNSUPPORTED 0xFFFF
  61. #define C(_x) \
  62. PERF_COUNT_HW_CACHE_##_x
  63. #define CACHE_OP_UNSUPPORTED 0xFFFF
  64. static int
  65. armpmu_map_cache_event(const unsigned (*cache_map)
  66. [PERF_COUNT_HW_CACHE_MAX]
  67. [PERF_COUNT_HW_CACHE_OP_MAX]
  68. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  69. u64 config)
  70. {
  71. unsigned int cache_type, cache_op, cache_result, ret;
  72. cache_type = (config >> 0) & 0xff;
  73. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  74. return -EINVAL;
  75. cache_op = (config >> 8) & 0xff;
  76. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  77. return -EINVAL;
  78. cache_result = (config >> 16) & 0xff;
  79. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  80. return -EINVAL;
  81. ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
  82. if (ret == CACHE_OP_UNSUPPORTED)
  83. return -ENOENT;
  84. return ret;
  85. }
  86. static int
  87. armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
  88. {
  89. int mapping = (*event_map)[config];
  90. return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
  91. }
  92. static int
  93. armpmu_map_raw_event(u32 raw_event_mask, u64 config)
  94. {
  95. return (int)(config & raw_event_mask);
  96. }
  97. static int map_cpu_event(struct perf_event *event,
  98. const unsigned (*event_map)[PERF_COUNT_HW_MAX],
  99. const unsigned (*cache_map)
  100. [PERF_COUNT_HW_CACHE_MAX]
  101. [PERF_COUNT_HW_CACHE_OP_MAX]
  102. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  103. u32 raw_event_mask)
  104. {
  105. u64 config = event->attr.config;
  106. switch (event->attr.type) {
  107. case PERF_TYPE_HARDWARE:
  108. return armpmu_map_event(event_map, config);
  109. case PERF_TYPE_HW_CACHE:
  110. return armpmu_map_cache_event(cache_map, config);
  111. case PERF_TYPE_RAW:
  112. return armpmu_map_raw_event(raw_event_mask, config);
  113. }
  114. return -ENOENT;
  115. }
  116. int
  117. armpmu_event_set_period(struct perf_event *event,
  118. struct hw_perf_event *hwc,
  119. int idx)
  120. {
  121. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  122. s64 left = local64_read(&hwc->period_left);
  123. s64 period = hwc->sample_period;
  124. int ret = 0;
  125. if (unlikely(left <= -period)) {
  126. left = period;
  127. local64_set(&hwc->period_left, left);
  128. hwc->last_period = period;
  129. ret = 1;
  130. }
  131. if (unlikely(left <= 0)) {
  132. left += period;
  133. local64_set(&hwc->period_left, left);
  134. hwc->last_period = period;
  135. ret = 1;
  136. }
  137. if (left > (s64)armpmu->max_period)
  138. left = armpmu->max_period;
  139. local64_set(&hwc->prev_count, (u64)-left);
  140. armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
  141. perf_event_update_userpage(event);
  142. return ret;
  143. }
  144. u64
  145. armpmu_event_update(struct perf_event *event,
  146. struct hw_perf_event *hwc,
  147. int idx)
  148. {
  149. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  150. u64 delta, prev_raw_count, new_raw_count;
  151. again:
  152. prev_raw_count = local64_read(&hwc->prev_count);
  153. new_raw_count = armpmu->read_counter(idx);
  154. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  155. new_raw_count) != prev_raw_count)
  156. goto again;
  157. delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
  158. local64_add(delta, &event->count);
  159. local64_sub(delta, &hwc->period_left);
  160. return new_raw_count;
  161. }
  162. static void
  163. armpmu_read(struct perf_event *event)
  164. {
  165. struct hw_perf_event *hwc = &event->hw;
  166. /* Don't read disabled counters! */
  167. if (hwc->idx < 0)
  168. return;
  169. armpmu_event_update(event, hwc, hwc->idx);
  170. }
  171. static void
  172. armpmu_stop(struct perf_event *event, int flags)
  173. {
  174. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  175. struct hw_perf_event *hwc = &event->hw;
  176. /*
  177. * ARM pmu always has to update the counter, so ignore
  178. * PERF_EF_UPDATE, see comments in armpmu_start().
  179. */
  180. if (!(hwc->state & PERF_HES_STOPPED)) {
  181. armpmu->disable(hwc, hwc->idx);
  182. barrier(); /* why? */
  183. armpmu_event_update(event, hwc, hwc->idx);
  184. hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  185. }
  186. }
  187. static void
  188. armpmu_start(struct perf_event *event, int flags)
  189. {
  190. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  191. struct hw_perf_event *hwc = &event->hw;
  192. /*
  193. * ARM pmu always has to reprogram the period, so ignore
  194. * PERF_EF_RELOAD, see the comment below.
  195. */
  196. if (flags & PERF_EF_RELOAD)
  197. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  198. hwc->state = 0;
  199. /*
  200. * Set the period again. Some counters can't be stopped, so when we
  201. * were stopped we simply disabled the IRQ source and the counter
  202. * may have been left counting. If we don't do this step then we may
  203. * get an interrupt too soon or *way* too late if the overflow has
  204. * happened since disabling.
  205. */
  206. armpmu_event_set_period(event, hwc, hwc->idx);
  207. armpmu->enable(hwc, hwc->idx);
  208. }
  209. static void
  210. armpmu_del(struct perf_event *event, int flags)
  211. {
  212. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  213. struct pmu_hw_events *hw_events = armpmu->get_hw_events();
  214. struct hw_perf_event *hwc = &event->hw;
  215. int idx = hwc->idx;
  216. WARN_ON(idx < 0);
  217. armpmu_stop(event, PERF_EF_UPDATE);
  218. hw_events->events[idx] = NULL;
  219. clear_bit(idx, hw_events->used_mask);
  220. perf_event_update_userpage(event);
  221. }
  222. static int
  223. armpmu_add(struct perf_event *event, int flags)
  224. {
  225. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  226. struct pmu_hw_events *hw_events = armpmu->get_hw_events();
  227. struct hw_perf_event *hwc = &event->hw;
  228. int idx;
  229. int err = 0;
  230. perf_pmu_disable(event->pmu);
  231. /* If we don't have a space for the counter then finish early. */
  232. idx = armpmu->get_event_idx(hw_events, hwc);
  233. if (idx < 0) {
  234. err = idx;
  235. goto out;
  236. }
  237. /*
  238. * If there is an event in the counter we are going to use then make
  239. * sure it is disabled.
  240. */
  241. event->hw.idx = idx;
  242. armpmu->disable(hwc, idx);
  243. hw_events->events[idx] = event;
  244. hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  245. if (flags & PERF_EF_START)
  246. armpmu_start(event, PERF_EF_RELOAD);
  247. /* Propagate our changes to the userspace mapping. */
  248. perf_event_update_userpage(event);
  249. out:
  250. perf_pmu_enable(event->pmu);
  251. return err;
  252. }
  253. static int
  254. validate_event(struct pmu_hw_events *hw_events,
  255. struct perf_event *event)
  256. {
  257. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  258. struct hw_perf_event fake_event = event->hw;
  259. struct pmu *leader_pmu = event->group_leader->pmu;
  260. if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
  261. return 1;
  262. return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
  263. }
  264. static int
  265. validate_group(struct perf_event *event)
  266. {
  267. struct perf_event *sibling, *leader = event->group_leader;
  268. struct pmu_hw_events fake_pmu;
  269. DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
  270. /*
  271. * Initialise the fake PMU. We only need to populate the
  272. * used_mask for the purposes of validation.
  273. */
  274. memset(fake_used_mask, 0, sizeof(fake_used_mask));
  275. fake_pmu.used_mask = fake_used_mask;
  276. if (!validate_event(&fake_pmu, leader))
  277. return -EINVAL;
  278. list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
  279. if (!validate_event(&fake_pmu, sibling))
  280. return -EINVAL;
  281. }
  282. if (!validate_event(&fake_pmu, event))
  283. return -EINVAL;
  284. return 0;
  285. }
  286. static void
  287. armpmu_release_hardware(struct arm_pmu *armpmu)
  288. {
  289. int i, irq, irqs;
  290. struct platform_device *pmu_device = armpmu->plat_device;
  291. irqs = min(pmu_device->num_resources, num_possible_cpus());
  292. for (i = 0; i < irqs; ++i) {
  293. if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
  294. continue;
  295. irq = platform_get_irq(pmu_device, i);
  296. if (irq >= 0)
  297. free_irq(irq, armpmu);
  298. }
  299. }
  300. static int
  301. armpmu_reserve_hardware(struct arm_pmu *armpmu)
  302. {
  303. int i, err, irq, irqs;
  304. struct platform_device *pmu_device = armpmu->plat_device;
  305. if (!pmu_device) {
  306. pr_err("no PMU device registered\n");
  307. return -ENODEV;
  308. }
  309. irqs = min(pmu_device->num_resources, num_possible_cpus());
  310. if (irqs < 1) {
  311. pr_err("no irqs for PMUs defined\n");
  312. return -ENODEV;
  313. }
  314. for (i = 0; i < irqs; ++i) {
  315. err = 0;
  316. irq = platform_get_irq(pmu_device, i);
  317. if (irq < 0)
  318. continue;
  319. /*
  320. * If we have a single PMU interrupt that we can't shift,
  321. * assume that we're running on a uniprocessor machine and
  322. * continue. Otherwise, continue without this interrupt.
  323. */
  324. if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
  325. pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
  326. irq, i);
  327. continue;
  328. }
  329. err = request_irq(irq, armpmu->handle_irq,
  330. IRQF_NOBALANCING,
  331. "arm-pmu", armpmu);
  332. if (err) {
  333. pr_err("unable to request IRQ%d for ARM PMU counters\n",
  334. irq);
  335. armpmu_release_hardware(armpmu);
  336. return err;
  337. }
  338. cpumask_set_cpu(i, &armpmu->active_irqs);
  339. }
  340. return 0;
  341. }
  342. static void
  343. hw_perf_event_destroy(struct perf_event *event)
  344. {
  345. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  346. atomic_t *active_events = &armpmu->active_events;
  347. struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
  348. if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
  349. armpmu_release_hardware(armpmu);
  350. mutex_unlock(pmu_reserve_mutex);
  351. }
  352. }
  353. static int
  354. event_requires_mode_exclusion(struct perf_event_attr *attr)
  355. {
  356. return attr->exclude_idle || attr->exclude_user ||
  357. attr->exclude_kernel || attr->exclude_hv;
  358. }
  359. static int
  360. __hw_perf_event_init(struct perf_event *event)
  361. {
  362. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  363. struct hw_perf_event *hwc = &event->hw;
  364. int mapping, err;
  365. mapping = armpmu->map_event(event);
  366. if (mapping < 0) {
  367. pr_debug("event %x:%llx not supported\n", event->attr.type,
  368. event->attr.config);
  369. return mapping;
  370. }
  371. /*
  372. * We don't assign an index until we actually place the event onto
  373. * hardware. Use -1 to signify that we haven't decided where to put it
  374. * yet. For SMP systems, each core has it's own PMU so we can't do any
  375. * clever allocation or constraints checking at this point.
  376. */
  377. hwc->idx = -1;
  378. hwc->config_base = 0;
  379. hwc->config = 0;
  380. hwc->event_base = 0;
  381. /*
  382. * Check whether we need to exclude the counter from certain modes.
  383. */
  384. if ((!armpmu->set_event_filter ||
  385. armpmu->set_event_filter(hwc, &event->attr)) &&
  386. event_requires_mode_exclusion(&event->attr)) {
  387. pr_debug("ARM performance counters do not support mode exclusion\n");
  388. return -EPERM;
  389. }
  390. /*
  391. * Store the event encoding into the config_base field.
  392. */
  393. hwc->config_base |= (unsigned long)mapping;
  394. if (!hwc->sample_period) {
  395. /*
  396. * For non-sampling runs, limit the sample_period to half
  397. * of the counter width. That way, the new counter value
  398. * is far less likely to overtake the previous one unless
  399. * you have some serious IRQ latency issues.
  400. */
  401. hwc->sample_period = armpmu->max_period >> 1;
  402. hwc->last_period = hwc->sample_period;
  403. local64_set(&hwc->period_left, hwc->sample_period);
  404. }
  405. err = 0;
  406. if (event->group_leader != event) {
  407. err = validate_group(event);
  408. if (err)
  409. return -EINVAL;
  410. }
  411. return err;
  412. }
  413. static int armpmu_event_init(struct perf_event *event)
  414. {
  415. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  416. int err = 0;
  417. atomic_t *active_events = &armpmu->active_events;
  418. if (armpmu->map_event(event) == -ENOENT)
  419. return -ENOENT;
  420. event->destroy = hw_perf_event_destroy;
  421. if (!atomic_inc_not_zero(active_events)) {
  422. mutex_lock(&armpmu->reserve_mutex);
  423. if (atomic_read(active_events) == 0)
  424. err = armpmu_reserve_hardware(armpmu);
  425. if (!err)
  426. atomic_inc(active_events);
  427. mutex_unlock(&armpmu->reserve_mutex);
  428. }
  429. if (err)
  430. return err;
  431. err = __hw_perf_event_init(event);
  432. if (err)
  433. hw_perf_event_destroy(event);
  434. return err;
  435. }
  436. static void armpmu_enable(struct pmu *pmu)
  437. {
  438. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  439. struct pmu_hw_events *hw_events = armpmu->get_hw_events();
  440. int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
  441. if (enabled)
  442. armpmu->start();
  443. }
  444. static void armpmu_disable(struct pmu *pmu)
  445. {
  446. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  447. armpmu->stop();
  448. }
  449. static void __init armpmu_init(struct arm_pmu *armpmu)
  450. {
  451. atomic_set(&armpmu->active_events, 0);
  452. mutex_init(&armpmu->reserve_mutex);
  453. armpmu->pmu = (struct pmu) {
  454. .pmu_enable = armpmu_enable,
  455. .pmu_disable = armpmu_disable,
  456. .event_init = armpmu_event_init,
  457. .add = armpmu_add,
  458. .del = armpmu_del,
  459. .start = armpmu_start,
  460. .stop = armpmu_stop,
  461. .read = armpmu_read,
  462. };
  463. }
  464. int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
  465. {
  466. armpmu_init(armpmu);
  467. return perf_pmu_register(&armpmu->pmu, name, type);
  468. }
  469. /*
  470. * ARMv8 PMUv3 Performance Events handling code.
  471. * Common event types.
  472. */
  473. enum armv8_pmuv3_perf_types {
  474. /* Required events. */
  475. ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
  476. ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
  477. ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
  478. ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
  479. ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
  480. ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
  481. /* At least one of the following is required. */
  482. ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
  483. ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
  484. /* Common architectural events. */
  485. ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
  486. ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
  487. ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
  488. ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
  489. ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
  490. ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
  491. ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
  492. ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
  493. ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
  494. ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
  495. /* Common microarchitectural events. */
  496. ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
  497. ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
  498. ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
  499. ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
  500. ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
  501. ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
  502. ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
  503. ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
  504. ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
  505. ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
  506. ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
  507. ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
  508. };
  509. /* PMUv3 HW events mapping. */
  510. static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
  511. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  512. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  513. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  514. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  515. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
  516. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  517. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  518. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  519. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  520. };
  521. static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  522. [PERF_COUNT_HW_CACHE_OP_MAX]
  523. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  524. [C(L1D)] = {
  525. [C(OP_READ)] = {
  526. [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  527. [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  528. },
  529. [C(OP_WRITE)] = {
  530. [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  531. [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  532. },
  533. [C(OP_PREFETCH)] = {
  534. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  535. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  536. },
  537. },
  538. [C(L1I)] = {
  539. [C(OP_READ)] = {
  540. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  541. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  542. },
  543. [C(OP_WRITE)] = {
  544. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  545. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  546. },
  547. [C(OP_PREFETCH)] = {
  548. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  549. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  550. },
  551. },
  552. [C(LL)] = {
  553. [C(OP_READ)] = {
  554. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  555. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  556. },
  557. [C(OP_WRITE)] = {
  558. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  559. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  560. },
  561. [C(OP_PREFETCH)] = {
  562. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  563. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  564. },
  565. },
  566. [C(DTLB)] = {
  567. [C(OP_READ)] = {
  568. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  569. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  570. },
  571. [C(OP_WRITE)] = {
  572. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  573. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  574. },
  575. [C(OP_PREFETCH)] = {
  576. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  577. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  578. },
  579. },
  580. [C(ITLB)] = {
  581. [C(OP_READ)] = {
  582. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  583. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  584. },
  585. [C(OP_WRITE)] = {
  586. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  587. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  588. },
  589. [C(OP_PREFETCH)] = {
  590. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  591. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  592. },
  593. },
  594. [C(BPU)] = {
  595. [C(OP_READ)] = {
  596. [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  597. [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  598. },
  599. [C(OP_WRITE)] = {
  600. [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  601. [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  602. },
  603. [C(OP_PREFETCH)] = {
  604. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  605. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  606. },
  607. },
  608. [C(NODE)] = {
  609. [C(OP_READ)] = {
  610. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  611. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  612. },
  613. [C(OP_WRITE)] = {
  614. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  615. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  616. },
  617. [C(OP_PREFETCH)] = {
  618. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  619. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  620. },
  621. },
  622. };
  623. /*
  624. * Perf Events' indices
  625. */
  626. #define ARMV8_IDX_CYCLE_COUNTER 0
  627. #define ARMV8_IDX_COUNTER0 1
  628. #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  629. #define ARMV8_MAX_COUNTERS 32
  630. #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
  631. /*
  632. * ARMv8 low level PMU access
  633. */
  634. /*
  635. * Perf Event to low level counters mapping
  636. */
  637. #define ARMV8_IDX_TO_COUNTER(x) \
  638. (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
  639. /*
  640. * Per-CPU PMCR: config reg
  641. */
  642. #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
  643. #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
  644. #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
  645. #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  646. #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
  647. #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  648. #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
  649. #define ARMV8_PMCR_N_MASK 0x1f
  650. #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
  651. /*
  652. * PMOVSR: counters overflow flag status reg
  653. */
  654. #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
  655. #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
  656. /*
  657. * PMXEVTYPER: Event selection reg
  658. */
  659. #define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
  660. #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
  661. /*
  662. * Event filters for PMUv3
  663. */
  664. #define ARMV8_EXCLUDE_EL1 (1 << 31)
  665. #define ARMV8_EXCLUDE_EL0 (1 << 30)
  666. #define ARMV8_INCLUDE_EL2 (1 << 27)
  667. static inline u32 armv8pmu_pmcr_read(void)
  668. {
  669. u32 val;
  670. asm volatile("mrs %0, pmcr_el0" : "=r" (val));
  671. return val;
  672. }
  673. static inline void armv8pmu_pmcr_write(u32 val)
  674. {
  675. val &= ARMV8_PMCR_MASK;
  676. isb();
  677. asm volatile("msr pmcr_el0, %0" :: "r" (val));
  678. }
  679. static inline int armv8pmu_has_overflowed(u32 pmovsr)
  680. {
  681. return pmovsr & ARMV8_OVERFLOWED_MASK;
  682. }
  683. static inline int armv8pmu_counter_valid(int idx)
  684. {
  685. return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
  686. }
  687. static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
  688. {
  689. int ret = 0;
  690. u32 counter;
  691. if (!armv8pmu_counter_valid(idx)) {
  692. pr_err("CPU%u checking wrong counter %d overflow status\n",
  693. smp_processor_id(), idx);
  694. } else {
  695. counter = ARMV8_IDX_TO_COUNTER(idx);
  696. ret = pmnc & BIT(counter);
  697. }
  698. return ret;
  699. }
  700. static inline int armv8pmu_select_counter(int idx)
  701. {
  702. u32 counter;
  703. if (!armv8pmu_counter_valid(idx)) {
  704. pr_err("CPU%u selecting wrong PMNC counter %d\n",
  705. smp_processor_id(), idx);
  706. return -EINVAL;
  707. }
  708. counter = ARMV8_IDX_TO_COUNTER(idx);
  709. asm volatile("msr pmselr_el0, %0" :: "r" (counter));
  710. isb();
  711. return idx;
  712. }
  713. static inline u32 armv8pmu_read_counter(int idx)
  714. {
  715. u32 value = 0;
  716. if (!armv8pmu_counter_valid(idx))
  717. pr_err("CPU%u reading wrong counter %d\n",
  718. smp_processor_id(), idx);
  719. else if (idx == ARMV8_IDX_CYCLE_COUNTER)
  720. asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
  721. else if (armv8pmu_select_counter(idx) == idx)
  722. asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
  723. return value;
  724. }
  725. static inline void armv8pmu_write_counter(int idx, u32 value)
  726. {
  727. if (!armv8pmu_counter_valid(idx))
  728. pr_err("CPU%u writing wrong counter %d\n",
  729. smp_processor_id(), idx);
  730. else if (idx == ARMV8_IDX_CYCLE_COUNTER)
  731. asm volatile("msr pmccntr_el0, %0" :: "r" (value));
  732. else if (armv8pmu_select_counter(idx) == idx)
  733. asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
  734. }
  735. static inline void armv8pmu_write_evtype(int idx, u32 val)
  736. {
  737. if (armv8pmu_select_counter(idx) == idx) {
  738. val &= ARMV8_EVTYPE_MASK;
  739. asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
  740. }
  741. }
  742. static inline int armv8pmu_enable_counter(int idx)
  743. {
  744. u32 counter;
  745. if (!armv8pmu_counter_valid(idx)) {
  746. pr_err("CPU%u enabling wrong PMNC counter %d\n",
  747. smp_processor_id(), idx);
  748. return -EINVAL;
  749. }
  750. counter = ARMV8_IDX_TO_COUNTER(idx);
  751. asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
  752. return idx;
  753. }
  754. static inline int armv8pmu_disable_counter(int idx)
  755. {
  756. u32 counter;
  757. if (!armv8pmu_counter_valid(idx)) {
  758. pr_err("CPU%u disabling wrong PMNC counter %d\n",
  759. smp_processor_id(), idx);
  760. return -EINVAL;
  761. }
  762. counter = ARMV8_IDX_TO_COUNTER(idx);
  763. asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
  764. return idx;
  765. }
  766. static inline int armv8pmu_enable_intens(int idx)
  767. {
  768. u32 counter;
  769. if (!armv8pmu_counter_valid(idx)) {
  770. pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
  771. smp_processor_id(), idx);
  772. return -EINVAL;
  773. }
  774. counter = ARMV8_IDX_TO_COUNTER(idx);
  775. asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
  776. return idx;
  777. }
  778. static inline int armv8pmu_disable_intens(int idx)
  779. {
  780. u32 counter;
  781. if (!armv8pmu_counter_valid(idx)) {
  782. pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
  783. smp_processor_id(), idx);
  784. return -EINVAL;
  785. }
  786. counter = ARMV8_IDX_TO_COUNTER(idx);
  787. asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
  788. isb();
  789. /* Clear the overflow flag in case an interrupt is pending. */
  790. asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
  791. isb();
  792. return idx;
  793. }
  794. static inline u32 armv8pmu_getreset_flags(void)
  795. {
  796. u32 value;
  797. /* Read */
  798. asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
  799. /* Write to clear flags */
  800. value &= ARMV8_OVSR_MASK;
  801. asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
  802. return value;
  803. }
  804. static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
  805. {
  806. unsigned long flags;
  807. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  808. /*
  809. * Enable counter and interrupt, and set the counter to count
  810. * the event that we're interested in.
  811. */
  812. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  813. /*
  814. * Disable counter
  815. */
  816. armv8pmu_disable_counter(idx);
  817. /*
  818. * Set event (if destined for PMNx counters).
  819. */
  820. armv8pmu_write_evtype(idx, hwc->config_base);
  821. /*
  822. * Enable interrupt for this counter
  823. */
  824. armv8pmu_enable_intens(idx);
  825. /*
  826. * Enable counter
  827. */
  828. armv8pmu_enable_counter(idx);
  829. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  830. }
  831. static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
  832. {
  833. unsigned long flags;
  834. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  835. /*
  836. * Disable counter and interrupt
  837. */
  838. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  839. /*
  840. * Disable counter
  841. */
  842. armv8pmu_disable_counter(idx);
  843. /*
  844. * Disable interrupt for this counter
  845. */
  846. armv8pmu_disable_intens(idx);
  847. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  848. }
  849. static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
  850. {
  851. u32 pmovsr;
  852. struct perf_sample_data data;
  853. struct pmu_hw_events *cpuc;
  854. struct pt_regs *regs;
  855. int idx;
  856. /*
  857. * Get and reset the IRQ flags
  858. */
  859. pmovsr = armv8pmu_getreset_flags();
  860. /*
  861. * Did an overflow occur?
  862. */
  863. if (!armv8pmu_has_overflowed(pmovsr))
  864. return IRQ_NONE;
  865. /*
  866. * Handle the counter(s) overflow(s)
  867. */
  868. regs = get_irq_regs();
  869. cpuc = &__get_cpu_var(cpu_hw_events);
  870. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  871. struct perf_event *event = cpuc->events[idx];
  872. struct hw_perf_event *hwc;
  873. /* Ignore if we don't have an event. */
  874. if (!event)
  875. continue;
  876. /*
  877. * We have a single interrupt for all counters. Check that
  878. * each counter has overflowed before we process it.
  879. */
  880. if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
  881. continue;
  882. hwc = &event->hw;
  883. armpmu_event_update(event, hwc, idx);
  884. perf_sample_data_init(&data, 0, hwc->last_period);
  885. if (!armpmu_event_set_period(event, hwc, idx))
  886. continue;
  887. if (perf_event_overflow(event, &data, regs))
  888. cpu_pmu->disable(hwc, idx);
  889. }
  890. /*
  891. * Handle the pending perf events.
  892. *
  893. * Note: this call *must* be run with interrupts disabled. For
  894. * platforms that can have the PMU interrupts raised as an NMI, this
  895. * will not work.
  896. */
  897. irq_work_run();
  898. return IRQ_HANDLED;
  899. }
  900. static void armv8pmu_start(void)
  901. {
  902. unsigned long flags;
  903. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  904. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  905. /* Enable all counters */
  906. armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
  907. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  908. }
  909. static void armv8pmu_stop(void)
  910. {
  911. unsigned long flags;
  912. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  913. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  914. /* Disable all counters */
  915. armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
  916. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  917. }
  918. static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
  919. struct hw_perf_event *event)
  920. {
  921. int idx;
  922. unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
  923. /* Always place a cycle counter into the cycle counter. */
  924. if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
  925. if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
  926. return -EAGAIN;
  927. return ARMV8_IDX_CYCLE_COUNTER;
  928. }
  929. /*
  930. * For anything other than a cycle counter, try and use
  931. * the events counters
  932. */
  933. for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  934. if (!test_and_set_bit(idx, cpuc->used_mask))
  935. return idx;
  936. }
  937. /* The counters are all in use. */
  938. return -EAGAIN;
  939. }
  940. /*
  941. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  942. */
  943. static int armv8pmu_set_event_filter(struct hw_perf_event *event,
  944. struct perf_event_attr *attr)
  945. {
  946. unsigned long config_base = 0;
  947. if (attr->exclude_idle)
  948. return -EPERM;
  949. if (attr->exclude_user)
  950. config_base |= ARMV8_EXCLUDE_EL0;
  951. if (attr->exclude_kernel)
  952. config_base |= ARMV8_EXCLUDE_EL1;
  953. if (!attr->exclude_hv)
  954. config_base |= ARMV8_INCLUDE_EL2;
  955. /*
  956. * Install the filter into config_base as this is used to
  957. * construct the event type.
  958. */
  959. event->config_base = config_base;
  960. return 0;
  961. }
  962. static void armv8pmu_reset(void *info)
  963. {
  964. u32 idx, nb_cnt = cpu_pmu->num_events;
  965. /* The counter and interrupt enable registers are unknown at reset. */
  966. for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
  967. armv8pmu_disable_event(NULL, idx);
  968. /* Initialize & Reset PMNC: C and P bits. */
  969. armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
  970. /* Disable access from userspace. */
  971. asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
  972. }
  973. static int armv8_pmuv3_map_event(struct perf_event *event)
  974. {
  975. return map_cpu_event(event, &armv8_pmuv3_perf_map,
  976. &armv8_pmuv3_perf_cache_map, 0xFF);
  977. }
  978. static struct arm_pmu armv8pmu = {
  979. .handle_irq = armv8pmu_handle_irq,
  980. .enable = armv8pmu_enable_event,
  981. .disable = armv8pmu_disable_event,
  982. .read_counter = armv8pmu_read_counter,
  983. .write_counter = armv8pmu_write_counter,
  984. .get_event_idx = armv8pmu_get_event_idx,
  985. .start = armv8pmu_start,
  986. .stop = armv8pmu_stop,
  987. .reset = armv8pmu_reset,
  988. .max_period = (1LLU << 32) - 1,
  989. };
  990. static u32 __init armv8pmu_read_num_pmnc_events(void)
  991. {
  992. u32 nb_cnt;
  993. /* Read the nb of CNTx counters supported from PMNC */
  994. nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
  995. /* Add the CPU cycles counter and return */
  996. return nb_cnt + 1;
  997. }
  998. static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
  999. {
  1000. armv8pmu.name = "arm/armv8-pmuv3";
  1001. armv8pmu.map_event = armv8_pmuv3_map_event;
  1002. armv8pmu.num_events = armv8pmu_read_num_pmnc_events();
  1003. armv8pmu.set_event_filter = armv8pmu_set_event_filter;
  1004. return &armv8pmu;
  1005. }
  1006. /*
  1007. * Ensure the PMU has sane values out of reset.
  1008. * This requires SMP to be available, so exists as a separate initcall.
  1009. */
  1010. static int __init
  1011. cpu_pmu_reset(void)
  1012. {
  1013. if (cpu_pmu && cpu_pmu->reset)
  1014. return on_each_cpu(cpu_pmu->reset, NULL, 1);
  1015. return 0;
  1016. }
  1017. arch_initcall(cpu_pmu_reset);
  1018. /*
  1019. * PMU platform driver and devicetree bindings.
  1020. */
  1021. static struct of_device_id armpmu_of_device_ids[] = {
  1022. {.compatible = "arm,armv8-pmuv3"},
  1023. {},
  1024. };
  1025. static int armpmu_device_probe(struct platform_device *pdev)
  1026. {
  1027. if (!cpu_pmu)
  1028. return -ENODEV;
  1029. cpu_pmu->plat_device = pdev;
  1030. return 0;
  1031. }
  1032. static struct platform_driver armpmu_driver = {
  1033. .driver = {
  1034. .name = "arm-pmu",
  1035. .of_match_table = armpmu_of_device_ids,
  1036. },
  1037. .probe = armpmu_device_probe,
  1038. };
  1039. static int __init register_pmu_driver(void)
  1040. {
  1041. return platform_driver_register(&armpmu_driver);
  1042. }
  1043. device_initcall(register_pmu_driver);
  1044. static struct pmu_hw_events *armpmu_get_cpu_events(void)
  1045. {
  1046. return &__get_cpu_var(cpu_hw_events);
  1047. }
  1048. static void __init cpu_pmu_init(struct arm_pmu *armpmu)
  1049. {
  1050. int cpu;
  1051. for_each_possible_cpu(cpu) {
  1052. struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
  1053. events->events = per_cpu(hw_events, cpu);
  1054. events->used_mask = per_cpu(used_mask, cpu);
  1055. raw_spin_lock_init(&events->pmu_lock);
  1056. }
  1057. armpmu->get_hw_events = armpmu_get_cpu_events;
  1058. }
  1059. static int __init init_hw_perf_events(void)
  1060. {
  1061. u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
  1062. switch ((dfr >> 8) & 0xf) {
  1063. case 0x1: /* PMUv3 */
  1064. cpu_pmu = armv8_pmuv3_pmu_init();
  1065. break;
  1066. }
  1067. if (cpu_pmu) {
  1068. pr_info("enabled with %s PMU driver, %d counters available\n",
  1069. cpu_pmu->name, cpu_pmu->num_events);
  1070. cpu_pmu_init(cpu_pmu);
  1071. armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
  1072. } else {
  1073. pr_info("no hardware support available\n");
  1074. }
  1075. return 0;
  1076. }
  1077. early_initcall(init_hw_perf_events);
  1078. /*
  1079. * Callchain handling code.
  1080. */
  1081. struct frame_tail {
  1082. struct frame_tail __user *fp;
  1083. unsigned long lr;
  1084. } __attribute__((packed));
  1085. /*
  1086. * Get the return address for a single stackframe and return a pointer to the
  1087. * next frame tail.
  1088. */
  1089. static struct frame_tail __user *
  1090. user_backtrace(struct frame_tail __user *tail,
  1091. struct perf_callchain_entry *entry)
  1092. {
  1093. struct frame_tail buftail;
  1094. unsigned long err;
  1095. /* Also check accessibility of one struct frame_tail beyond */
  1096. if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
  1097. return NULL;
  1098. pagefault_disable();
  1099. err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
  1100. pagefault_enable();
  1101. if (err)
  1102. return NULL;
  1103. perf_callchain_store(entry, buftail.lr);
  1104. /*
  1105. * Frame pointers should strictly progress back up the stack
  1106. * (towards higher addresses).
  1107. */
  1108. if (tail >= buftail.fp)
  1109. return NULL;
  1110. return buftail.fp;
  1111. }
  1112. void perf_callchain_user(struct perf_callchain_entry *entry,
  1113. struct pt_regs *regs)
  1114. {
  1115. struct frame_tail __user *tail;
  1116. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1117. /* We don't support guest os callchain now */
  1118. return;
  1119. }
  1120. tail = (struct frame_tail __user *)regs->regs[29];
  1121. while (entry->nr < PERF_MAX_STACK_DEPTH &&
  1122. tail && !((unsigned long)tail & 0xf))
  1123. tail = user_backtrace(tail, entry);
  1124. }
  1125. /*
  1126. * Gets called by walk_stackframe() for every stackframe. This will be called
  1127. * whist unwinding the stackframe and is like a subroutine return so we use
  1128. * the PC.
  1129. */
  1130. static int callchain_trace(struct stackframe *frame, void *data)
  1131. {
  1132. struct perf_callchain_entry *entry = data;
  1133. perf_callchain_store(entry, frame->pc);
  1134. return 0;
  1135. }
  1136. void perf_callchain_kernel(struct perf_callchain_entry *entry,
  1137. struct pt_regs *regs)
  1138. {
  1139. struct stackframe frame;
  1140. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1141. /* We don't support guest os callchain now */
  1142. return;
  1143. }
  1144. frame.fp = regs->regs[29];
  1145. frame.sp = regs->sp;
  1146. frame.pc = regs->pc;
  1147. walk_stackframe(&frame, callchain_trace, entry);
  1148. }
  1149. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1150. {
  1151. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  1152. return perf_guest_cbs->get_guest_ip();
  1153. return instruction_pointer(regs);
  1154. }
  1155. unsigned long perf_misc_flags(struct pt_regs *regs)
  1156. {
  1157. int misc = 0;
  1158. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1159. if (perf_guest_cbs->is_user_mode())
  1160. misc |= PERF_RECORD_MISC_GUEST_USER;
  1161. else
  1162. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  1163. } else {
  1164. if (user_mode(regs))
  1165. misc |= PERF_RECORD_MISC_USER;
  1166. else
  1167. misc |= PERF_RECORD_MISC_KERNEL;
  1168. }
  1169. return misc;
  1170. }