perf_event_v7.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
  3. *
  4. * ARMv7 support: Jean Pihet <jpihet@mvista.com>
  5. * 2010 (c) MontaVista Software, LLC.
  6. *
  7. * Copied from ARMv6 code, with the low level code inspired
  8. * by the ARMv7 Oprofile code.
  9. *
  10. * Cortex-A8 has up to 4 configurable performance counters and
  11. * a single cycle counter.
  12. * Cortex-A9 has up to 31 configurable performance counters and
  13. * a single cycle counter.
  14. *
  15. * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16. * counter and all 4 performance counters together can be reset separately.
  17. */
  18. #ifdef CONFIG_CPU_V7
  19. /*
  20. * Common ARMv7 event types
  21. *
  22. * Note: An implementation may not be able to count all of these events
  23. * but the encodings are considered to be `reserved' in the case that
  24. * they are not available.
  25. */
  26. enum armv7_perf_types {
  27. ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
  28. ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
  29. ARMV7_PERFCTR_ITLB_REFILL = 0x02,
  30. ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
  31. ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
  32. ARMV7_PERFCTR_DTLB_REFILL = 0x05,
  33. ARMV7_PERFCTR_MEM_READ = 0x06,
  34. ARMV7_PERFCTR_MEM_WRITE = 0x07,
  35. ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
  36. ARMV7_PERFCTR_EXC_TAKEN = 0x09,
  37. ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
  38. ARMV7_PERFCTR_CID_WRITE = 0x0B,
  39. /*
  40. * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  41. * It counts:
  42. * - all (taken) branch instructions,
  43. * - instructions that explicitly write the PC,
  44. * - exception generating instructions.
  45. */
  46. ARMV7_PERFCTR_PC_WRITE = 0x0C,
  47. ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
  48. ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
  49. ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
  50. ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
  51. ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
  52. ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
  53. /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  54. ARMV7_PERFCTR_MEM_ACCESS = 0x13,
  55. ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
  56. ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
  57. ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
  58. ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
  59. ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
  60. ARMV7_PERFCTR_BUS_ACCESS = 0x19,
  61. ARMV7_PERFCTR_MEM_ERROR = 0x1A,
  62. ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
  63. ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
  64. ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
  65. ARMV7_PERFCTR_CPU_CYCLES = 0xFF
  66. };
  67. /* ARMv7 Cortex-A8 specific event types */
  68. enum armv7_a8_perf_types {
  69. ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
  70. ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
  71. ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
  72. ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
  73. };
  74. /* ARMv7 Cortex-A9 specific event types */
  75. enum armv7_a9_perf_types {
  76. ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
  77. ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
  78. ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
  79. };
  80. /* ARMv7 Cortex-A5 specific event types */
  81. enum armv7_a5_perf_types {
  82. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
  83. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
  84. };
  85. /* ARMv7 Cortex-A15 specific event types */
  86. enum armv7_a15_perf_types {
  87. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
  88. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
  89. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
  90. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
  91. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
  92. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
  93. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
  94. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
  95. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
  96. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
  97. ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
  98. };
  99. /*
  100. * Cortex-A8 HW events mapping
  101. *
  102. * The hardware events that we support. We do support cache operations but
  103. * we have harvard caches and no way to combine instruction and data
  104. * accesses/misses in hardware.
  105. */
  106. static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
  107. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  108. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  109. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  110. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  111. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  112. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  113. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  114. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
  115. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  116. };
  117. static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  118. [PERF_COUNT_HW_CACHE_OP_MAX]
  119. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  120. [C(L1D)] = {
  121. /*
  122. * The performance counters don't differentiate between read
  123. * and write accesses/misses so this isn't strictly correct,
  124. * but it's the best we can do. Writes and reads get
  125. * combined.
  126. */
  127. [C(OP_READ)] = {
  128. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  129. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  130. },
  131. [C(OP_WRITE)] = {
  132. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  133. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  134. },
  135. [C(OP_PREFETCH)] = {
  136. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  137. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  138. },
  139. },
  140. [C(L1I)] = {
  141. [C(OP_READ)] = {
  142. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
  143. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  144. },
  145. [C(OP_WRITE)] = {
  146. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
  147. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  148. },
  149. [C(OP_PREFETCH)] = {
  150. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  151. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  152. },
  153. },
  154. [C(LL)] = {
  155. [C(OP_READ)] = {
  156. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  157. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  158. },
  159. [C(OP_WRITE)] = {
  160. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  161. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  162. },
  163. [C(OP_PREFETCH)] = {
  164. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  165. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  166. },
  167. },
  168. [C(DTLB)] = {
  169. [C(OP_READ)] = {
  170. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  171. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  172. },
  173. [C(OP_WRITE)] = {
  174. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  175. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  176. },
  177. [C(OP_PREFETCH)] = {
  178. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  179. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  180. },
  181. },
  182. [C(ITLB)] = {
  183. [C(OP_READ)] = {
  184. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  185. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  186. },
  187. [C(OP_WRITE)] = {
  188. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  189. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  190. },
  191. [C(OP_PREFETCH)] = {
  192. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  193. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  194. },
  195. },
  196. [C(BPU)] = {
  197. [C(OP_READ)] = {
  198. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  199. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  200. },
  201. [C(OP_WRITE)] = {
  202. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  203. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  204. },
  205. [C(OP_PREFETCH)] = {
  206. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  207. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  208. },
  209. },
  210. [C(NODE)] = {
  211. [C(OP_READ)] = {
  212. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  213. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  214. },
  215. [C(OP_WRITE)] = {
  216. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  217. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  218. },
  219. [C(OP_PREFETCH)] = {
  220. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  221. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  222. },
  223. },
  224. };
  225. /*
  226. * Cortex-A9 HW events mapping
  227. */
  228. static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
  229. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  230. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
  231. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  232. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  233. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  234. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  235. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  236. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
  237. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
  238. };
  239. static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  240. [PERF_COUNT_HW_CACHE_OP_MAX]
  241. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  242. [C(L1D)] = {
  243. /*
  244. * The performance counters don't differentiate between read
  245. * and write accesses/misses so this isn't strictly correct,
  246. * but it's the best we can do. Writes and reads get
  247. * combined.
  248. */
  249. [C(OP_READ)] = {
  250. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  251. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  252. },
  253. [C(OP_WRITE)] = {
  254. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  255. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  256. },
  257. [C(OP_PREFETCH)] = {
  258. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  259. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  260. },
  261. },
  262. [C(L1I)] = {
  263. [C(OP_READ)] = {
  264. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  265. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  266. },
  267. [C(OP_WRITE)] = {
  268. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  269. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  270. },
  271. [C(OP_PREFETCH)] = {
  272. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  273. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  274. },
  275. },
  276. [C(LL)] = {
  277. [C(OP_READ)] = {
  278. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  279. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  280. },
  281. [C(OP_WRITE)] = {
  282. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  283. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  284. },
  285. [C(OP_PREFETCH)] = {
  286. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  287. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  288. },
  289. },
  290. [C(DTLB)] = {
  291. [C(OP_READ)] = {
  292. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  293. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  294. },
  295. [C(OP_WRITE)] = {
  296. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  297. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  298. },
  299. [C(OP_PREFETCH)] = {
  300. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  301. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  302. },
  303. },
  304. [C(ITLB)] = {
  305. [C(OP_READ)] = {
  306. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  307. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  308. },
  309. [C(OP_WRITE)] = {
  310. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  311. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  312. },
  313. [C(OP_PREFETCH)] = {
  314. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  315. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  316. },
  317. },
  318. [C(BPU)] = {
  319. [C(OP_READ)] = {
  320. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  321. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  322. },
  323. [C(OP_WRITE)] = {
  324. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  325. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  326. },
  327. [C(OP_PREFETCH)] = {
  328. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  329. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  330. },
  331. },
  332. [C(NODE)] = {
  333. [C(OP_READ)] = {
  334. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  335. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  336. },
  337. [C(OP_WRITE)] = {
  338. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  339. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  340. },
  341. [C(OP_PREFETCH)] = {
  342. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  343. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  344. },
  345. },
  346. };
  347. /*
  348. * Cortex-A5 HW events mapping
  349. */
  350. static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
  351. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  352. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  353. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  354. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  355. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  356. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  357. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  358. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  359. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  360. };
  361. static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  362. [PERF_COUNT_HW_CACHE_OP_MAX]
  363. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  364. [C(L1D)] = {
  365. [C(OP_READ)] = {
  366. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  367. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  368. },
  369. [C(OP_WRITE)] = {
  370. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  371. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  372. },
  373. [C(OP_PREFETCH)] = {
  374. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  375. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  376. },
  377. },
  378. [C(L1I)] = {
  379. [C(OP_READ)] = {
  380. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  381. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  382. },
  383. [C(OP_WRITE)] = {
  384. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  385. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  386. },
  387. /*
  388. * The prefetch counters don't differentiate between the I
  389. * side and the D side.
  390. */
  391. [C(OP_PREFETCH)] = {
  392. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  393. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  394. },
  395. },
  396. [C(LL)] = {
  397. [C(OP_READ)] = {
  398. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  399. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  400. },
  401. [C(OP_WRITE)] = {
  402. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  403. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  404. },
  405. [C(OP_PREFETCH)] = {
  406. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  407. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  408. },
  409. },
  410. [C(DTLB)] = {
  411. [C(OP_READ)] = {
  412. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  413. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  414. },
  415. [C(OP_WRITE)] = {
  416. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  417. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  418. },
  419. [C(OP_PREFETCH)] = {
  420. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  421. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  422. },
  423. },
  424. [C(ITLB)] = {
  425. [C(OP_READ)] = {
  426. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  427. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  428. },
  429. [C(OP_WRITE)] = {
  430. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  431. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  432. },
  433. [C(OP_PREFETCH)] = {
  434. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  435. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  436. },
  437. },
  438. [C(BPU)] = {
  439. [C(OP_READ)] = {
  440. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  441. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  442. },
  443. [C(OP_WRITE)] = {
  444. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  445. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  446. },
  447. [C(OP_PREFETCH)] = {
  448. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  449. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  450. },
  451. },
  452. [C(NODE)] = {
  453. [C(OP_READ)] = {
  454. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  455. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  456. },
  457. [C(OP_WRITE)] = {
  458. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  459. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  460. },
  461. [C(OP_PREFETCH)] = {
  462. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  463. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  464. },
  465. },
  466. };
  467. /*
  468. * Cortex-A15 HW events mapping
  469. */
  470. static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
  471. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  472. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  473. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  474. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  475. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
  476. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  477. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  478. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  479. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  480. };
  481. static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  482. [PERF_COUNT_HW_CACHE_OP_MAX]
  483. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  484. [C(L1D)] = {
  485. [C(OP_READ)] = {
  486. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
  487. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
  488. },
  489. [C(OP_WRITE)] = {
  490. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
  491. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
  492. },
  493. [C(OP_PREFETCH)] = {
  494. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  495. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  496. },
  497. },
  498. [C(L1I)] = {
  499. /*
  500. * Not all performance counters differentiate between read
  501. * and write accesses/misses so we're not always strictly
  502. * correct, but it's the best we can do. Writes and reads get
  503. * combined in these cases.
  504. */
  505. [C(OP_READ)] = {
  506. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  507. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  508. },
  509. [C(OP_WRITE)] = {
  510. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  511. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  512. },
  513. [C(OP_PREFETCH)] = {
  514. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  515. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  516. },
  517. },
  518. [C(LL)] = {
  519. [C(OP_READ)] = {
  520. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
  521. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
  522. },
  523. [C(OP_WRITE)] = {
  524. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
  525. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
  526. },
  527. [C(OP_PREFETCH)] = {
  528. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  529. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  530. },
  531. },
  532. [C(DTLB)] = {
  533. [C(OP_READ)] = {
  534. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  535. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
  536. },
  537. [C(OP_WRITE)] = {
  538. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  539. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
  540. },
  541. [C(OP_PREFETCH)] = {
  542. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  543. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  544. },
  545. },
  546. [C(ITLB)] = {
  547. [C(OP_READ)] = {
  548. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  549. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  550. },
  551. [C(OP_WRITE)] = {
  552. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  553. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  554. },
  555. [C(OP_PREFETCH)] = {
  556. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  557. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  558. },
  559. },
  560. [C(BPU)] = {
  561. [C(OP_READ)] = {
  562. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  563. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  564. },
  565. [C(OP_WRITE)] = {
  566. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  567. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  568. },
  569. [C(OP_PREFETCH)] = {
  570. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  571. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  572. },
  573. },
  574. [C(NODE)] = {
  575. [C(OP_READ)] = {
  576. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  577. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  578. },
  579. [C(OP_WRITE)] = {
  580. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  581. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  582. },
  583. [C(OP_PREFETCH)] = {
  584. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  585. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  586. },
  587. },
  588. };
  589. /*
  590. * Cortex-A7 HW events mapping
  591. */
  592. static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
  593. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  594. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  595. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  596. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  597. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  598. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  599. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  600. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  601. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  602. };
  603. static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  604. [PERF_COUNT_HW_CACHE_OP_MAX]
  605. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  606. [C(L1D)] = {
  607. /*
  608. * The performance counters don't differentiate between read
  609. * and write accesses/misses so this isn't strictly correct,
  610. * but it's the best we can do. Writes and reads get
  611. * combined.
  612. */
  613. [C(OP_READ)] = {
  614. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  615. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  616. },
  617. [C(OP_WRITE)] = {
  618. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  619. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  620. },
  621. [C(OP_PREFETCH)] = {
  622. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  623. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  624. },
  625. },
  626. [C(L1I)] = {
  627. [C(OP_READ)] = {
  628. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  629. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  630. },
  631. [C(OP_WRITE)] = {
  632. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  633. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  634. },
  635. [C(OP_PREFETCH)] = {
  636. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  637. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  638. },
  639. },
  640. [C(LL)] = {
  641. [C(OP_READ)] = {
  642. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  643. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  644. },
  645. [C(OP_WRITE)] = {
  646. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  647. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  648. },
  649. [C(OP_PREFETCH)] = {
  650. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  651. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  652. },
  653. },
  654. [C(DTLB)] = {
  655. [C(OP_READ)] = {
  656. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  657. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  658. },
  659. [C(OP_WRITE)] = {
  660. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  661. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  662. },
  663. [C(OP_PREFETCH)] = {
  664. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  665. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  666. },
  667. },
  668. [C(ITLB)] = {
  669. [C(OP_READ)] = {
  670. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  671. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  672. },
  673. [C(OP_WRITE)] = {
  674. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  675. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  676. },
  677. [C(OP_PREFETCH)] = {
  678. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  679. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  680. },
  681. },
  682. [C(BPU)] = {
  683. [C(OP_READ)] = {
  684. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  685. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  686. },
  687. [C(OP_WRITE)] = {
  688. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  689. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  690. },
  691. [C(OP_PREFETCH)] = {
  692. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  693. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  694. },
  695. },
  696. [C(NODE)] = {
  697. [C(OP_READ)] = {
  698. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  699. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  700. },
  701. [C(OP_WRITE)] = {
  702. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  703. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  704. },
  705. [C(OP_PREFETCH)] = {
  706. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  707. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  708. },
  709. },
  710. };
  711. /*
  712. * Perf Events' indices
  713. */
  714. #define ARMV7_IDX_CYCLE_COUNTER 0
  715. #define ARMV7_IDX_COUNTER0 1
  716. #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  717. #define ARMV7_MAX_COUNTERS 32
  718. #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
  719. /*
  720. * ARMv7 low level PMNC access
  721. */
  722. /*
  723. * Perf Event to low level counters mapping
  724. */
  725. #define ARMV7_IDX_TO_COUNTER(x) \
  726. (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
  727. /*
  728. * Per-CPU PMNC: config reg
  729. */
  730. #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
  731. #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
  732. #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
  733. #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  734. #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
  735. #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  736. #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
  737. #define ARMV7_PMNC_N_MASK 0x1f
  738. #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
  739. /*
  740. * FLAG: counters overflow flag status reg
  741. */
  742. #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
  743. #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
  744. /*
  745. * PMXEVTYPER: Event selection reg
  746. */
  747. #define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
  748. #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
  749. /*
  750. * Event filters for PMUv2
  751. */
  752. #define ARMV7_EXCLUDE_PL1 (1 << 31)
  753. #define ARMV7_EXCLUDE_USER (1 << 30)
  754. #define ARMV7_INCLUDE_HYP (1 << 27)
  755. static inline u32 armv7_pmnc_read(void)
  756. {
  757. u32 val;
  758. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
  759. return val;
  760. }
  761. static inline void armv7_pmnc_write(u32 val)
  762. {
  763. val &= ARMV7_PMNC_MASK;
  764. isb();
  765. asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
  766. }
  767. static inline int armv7_pmnc_has_overflowed(u32 pmnc)
  768. {
  769. return pmnc & ARMV7_OVERFLOWED_MASK;
  770. }
  771. static inline int armv7_pmnc_counter_valid(int idx)
  772. {
  773. return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
  774. }
  775. static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
  776. {
  777. int ret = 0;
  778. u32 counter;
  779. if (!armv7_pmnc_counter_valid(idx)) {
  780. pr_err("CPU%u checking wrong counter %d overflow status\n",
  781. smp_processor_id(), idx);
  782. } else {
  783. counter = ARMV7_IDX_TO_COUNTER(idx);
  784. ret = pmnc & BIT(counter);
  785. }
  786. return ret;
  787. }
  788. static inline int armv7_pmnc_select_counter(int idx)
  789. {
  790. u32 counter;
  791. if (!armv7_pmnc_counter_valid(idx)) {
  792. pr_err("CPU%u selecting wrong PMNC counter %d\n",
  793. smp_processor_id(), idx);
  794. return -EINVAL;
  795. }
  796. counter = ARMV7_IDX_TO_COUNTER(idx);
  797. asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
  798. isb();
  799. return idx;
  800. }
  801. static inline u32 armv7pmu_read_counter(struct perf_event *event)
  802. {
  803. struct hw_perf_event *hwc = &event->hw;
  804. int idx = hwc->idx;
  805. u32 value = 0;
  806. if (!armv7_pmnc_counter_valid(idx))
  807. pr_err("CPU%u reading wrong counter %d\n",
  808. smp_processor_id(), idx);
  809. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  810. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
  811. else if (armv7_pmnc_select_counter(idx) == idx)
  812. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
  813. return value;
  814. }
  815. static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
  816. {
  817. struct hw_perf_event *hwc = &event->hw;
  818. int idx = hwc->idx;
  819. if (!armv7_pmnc_counter_valid(idx))
  820. pr_err("CPU%u writing wrong counter %d\n",
  821. smp_processor_id(), idx);
  822. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  823. asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
  824. else if (armv7_pmnc_select_counter(idx) == idx)
  825. asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
  826. }
  827. static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
  828. {
  829. if (armv7_pmnc_select_counter(idx) == idx) {
  830. val &= ARMV7_EVTYPE_MASK;
  831. asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
  832. }
  833. }
  834. static inline int armv7_pmnc_enable_counter(int idx)
  835. {
  836. u32 counter;
  837. if (!armv7_pmnc_counter_valid(idx)) {
  838. pr_err("CPU%u enabling wrong PMNC counter %d\n",
  839. smp_processor_id(), idx);
  840. return -EINVAL;
  841. }
  842. counter = ARMV7_IDX_TO_COUNTER(idx);
  843. asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
  844. return idx;
  845. }
  846. static inline int armv7_pmnc_disable_counter(int idx)
  847. {
  848. u32 counter;
  849. if (!armv7_pmnc_counter_valid(idx)) {
  850. pr_err("CPU%u disabling wrong PMNC counter %d\n",
  851. smp_processor_id(), idx);
  852. return -EINVAL;
  853. }
  854. counter = ARMV7_IDX_TO_COUNTER(idx);
  855. asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
  856. return idx;
  857. }
  858. static inline int armv7_pmnc_enable_intens(int idx)
  859. {
  860. u32 counter;
  861. if (!armv7_pmnc_counter_valid(idx)) {
  862. pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
  863. smp_processor_id(), idx);
  864. return -EINVAL;
  865. }
  866. counter = ARMV7_IDX_TO_COUNTER(idx);
  867. asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
  868. return idx;
  869. }
  870. static inline int armv7_pmnc_disable_intens(int idx)
  871. {
  872. u32 counter;
  873. if (!armv7_pmnc_counter_valid(idx)) {
  874. pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
  875. smp_processor_id(), idx);
  876. return -EINVAL;
  877. }
  878. counter = ARMV7_IDX_TO_COUNTER(idx);
  879. asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
  880. isb();
  881. /* Clear the overflow flag in case an interrupt is pending. */
  882. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
  883. isb();
  884. return idx;
  885. }
  886. static inline u32 armv7_pmnc_getreset_flags(void)
  887. {
  888. u32 val;
  889. /* Read */
  890. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  891. /* Write to clear flags */
  892. val &= ARMV7_FLAG_MASK;
  893. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
  894. return val;
  895. }
  896. #ifdef DEBUG
  897. static void armv7_pmnc_dump_regs(void)
  898. {
  899. u32 val;
  900. unsigned int cnt;
  901. printk(KERN_INFO "PMNC registers dump:\n");
  902. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
  903. printk(KERN_INFO "PMNC =0x%08x\n", val);
  904. asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
  905. printk(KERN_INFO "CNTENS=0x%08x\n", val);
  906. asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
  907. printk(KERN_INFO "INTENS=0x%08x\n", val);
  908. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  909. printk(KERN_INFO "FLAGS =0x%08x\n", val);
  910. asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
  911. printk(KERN_INFO "SELECT=0x%08x\n", val);
  912. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
  913. printk(KERN_INFO "CCNT =0x%08x\n", val);
  914. for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
  915. armv7_pmnc_select_counter(cnt);
  916. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
  917. printk(KERN_INFO "CNT[%d] count =0x%08x\n",
  918. ARMV7_IDX_TO_COUNTER(cnt), val);
  919. asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
  920. printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
  921. ARMV7_IDX_TO_COUNTER(cnt), val);
  922. }
  923. }
  924. #endif
  925. static void armv7pmu_enable_event(struct perf_event *event)
  926. {
  927. unsigned long flags;
  928. struct hw_perf_event *hwc = &event->hw;
  929. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  930. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  931. int idx = hwc->idx;
  932. /*
  933. * Enable counter and interrupt, and set the counter to count
  934. * the event that we're interested in.
  935. */
  936. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  937. /*
  938. * Disable counter
  939. */
  940. armv7_pmnc_disable_counter(idx);
  941. /*
  942. * Set event (if destined for PMNx counters)
  943. * We only need to set the event for the cycle counter if we
  944. * have the ability to perform event filtering.
  945. */
  946. if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
  947. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  948. /*
  949. * Enable interrupt for this counter
  950. */
  951. armv7_pmnc_enable_intens(idx);
  952. /*
  953. * Enable counter
  954. */
  955. armv7_pmnc_enable_counter(idx);
  956. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  957. }
  958. static void armv7pmu_disable_event(struct perf_event *event)
  959. {
  960. unsigned long flags;
  961. struct hw_perf_event *hwc = &event->hw;
  962. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  963. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  964. int idx = hwc->idx;
  965. /*
  966. * Disable counter and interrupt
  967. */
  968. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  969. /*
  970. * Disable counter
  971. */
  972. armv7_pmnc_disable_counter(idx);
  973. /*
  974. * Disable interrupt for this counter
  975. */
  976. armv7_pmnc_disable_intens(idx);
  977. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  978. }
  979. static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
  980. {
  981. u32 pmnc;
  982. struct perf_sample_data data;
  983. struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
  984. struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
  985. struct pt_regs *regs;
  986. int idx;
  987. /*
  988. * Get and reset the IRQ flags
  989. */
  990. pmnc = armv7_pmnc_getreset_flags();
  991. /*
  992. * Did an overflow occur?
  993. */
  994. if (!armv7_pmnc_has_overflowed(pmnc))
  995. return IRQ_NONE;
  996. /*
  997. * Handle the counter(s) overflow(s)
  998. */
  999. regs = get_irq_regs();
  1000. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  1001. struct perf_event *event = cpuc->events[idx];
  1002. struct hw_perf_event *hwc;
  1003. /* Ignore if we don't have an event. */
  1004. if (!event)
  1005. continue;
  1006. /*
  1007. * We have a single interrupt for all counters. Check that
  1008. * each counter has overflowed before we process it.
  1009. */
  1010. if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
  1011. continue;
  1012. hwc = &event->hw;
  1013. armpmu_event_update(event);
  1014. perf_sample_data_init(&data, 0, hwc->last_period);
  1015. if (!armpmu_event_set_period(event))
  1016. continue;
  1017. if (perf_event_overflow(event, &data, regs))
  1018. cpu_pmu->disable(event);
  1019. }
  1020. /*
  1021. * Handle the pending perf events.
  1022. *
  1023. * Note: this call *must* be run with interrupts disabled. For
  1024. * platforms that can have the PMU interrupts raised as an NMI, this
  1025. * will not work.
  1026. */
  1027. irq_work_run();
  1028. return IRQ_HANDLED;
  1029. }
  1030. static void armv7pmu_start(struct arm_pmu *cpu_pmu)
  1031. {
  1032. unsigned long flags;
  1033. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1034. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1035. /* Enable all counters */
  1036. armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
  1037. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1038. }
  1039. static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
  1040. {
  1041. unsigned long flags;
  1042. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1043. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1044. /* Disable all counters */
  1045. armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
  1046. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1047. }
  1048. static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
  1049. struct perf_event *event)
  1050. {
  1051. int idx;
  1052. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1053. struct hw_perf_event *hwc = &event->hw;
  1054. unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
  1055. /* Always place a cycle counter into the cycle counter. */
  1056. if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
  1057. if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
  1058. return -EAGAIN;
  1059. return ARMV7_IDX_CYCLE_COUNTER;
  1060. }
  1061. /*
  1062. * For anything other than a cycle counter, try and use
  1063. * the events counters
  1064. */
  1065. for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  1066. if (!test_and_set_bit(idx, cpuc->used_mask))
  1067. return idx;
  1068. }
  1069. /* The counters are all in use. */
  1070. return -EAGAIN;
  1071. }
  1072. /*
  1073. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  1074. */
  1075. static int armv7pmu_set_event_filter(struct hw_perf_event *event,
  1076. struct perf_event_attr *attr)
  1077. {
  1078. unsigned long config_base = 0;
  1079. if (attr->exclude_idle)
  1080. return -EPERM;
  1081. if (attr->exclude_user)
  1082. config_base |= ARMV7_EXCLUDE_USER;
  1083. if (attr->exclude_kernel)
  1084. config_base |= ARMV7_EXCLUDE_PL1;
  1085. if (!attr->exclude_hv)
  1086. config_base |= ARMV7_INCLUDE_HYP;
  1087. /*
  1088. * Install the filter into config_base as this is used to
  1089. * construct the event type.
  1090. */
  1091. event->config_base = config_base;
  1092. return 0;
  1093. }
  1094. static void armv7pmu_reset(void *info)
  1095. {
  1096. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  1097. u32 idx, nb_cnt = cpu_pmu->num_events;
  1098. /* The counter and interrupt enable registers are unknown at reset. */
  1099. for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  1100. armv7_pmnc_disable_counter(idx);
  1101. armv7_pmnc_disable_intens(idx);
  1102. }
  1103. /* Initialize & Reset PMNC: C and P bits */
  1104. armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
  1105. }
  1106. static int armv7_a8_map_event(struct perf_event *event)
  1107. {
  1108. return armpmu_map_event(event, &armv7_a8_perf_map,
  1109. &armv7_a8_perf_cache_map, 0xFF);
  1110. }
  1111. static int armv7_a9_map_event(struct perf_event *event)
  1112. {
  1113. return armpmu_map_event(event, &armv7_a9_perf_map,
  1114. &armv7_a9_perf_cache_map, 0xFF);
  1115. }
  1116. static int armv7_a5_map_event(struct perf_event *event)
  1117. {
  1118. return armpmu_map_event(event, &armv7_a5_perf_map,
  1119. &armv7_a5_perf_cache_map, 0xFF);
  1120. }
  1121. static int armv7_a15_map_event(struct perf_event *event)
  1122. {
  1123. return armpmu_map_event(event, &armv7_a15_perf_map,
  1124. &armv7_a15_perf_cache_map, 0xFF);
  1125. }
  1126. static int armv7_a7_map_event(struct perf_event *event)
  1127. {
  1128. return armpmu_map_event(event, &armv7_a7_perf_map,
  1129. &armv7_a7_perf_cache_map, 0xFF);
  1130. }
  1131. static void armv7pmu_init(struct arm_pmu *cpu_pmu)
  1132. {
  1133. cpu_pmu->handle_irq = armv7pmu_handle_irq;
  1134. cpu_pmu->enable = armv7pmu_enable_event;
  1135. cpu_pmu->disable = armv7pmu_disable_event;
  1136. cpu_pmu->read_counter = armv7pmu_read_counter;
  1137. cpu_pmu->write_counter = armv7pmu_write_counter;
  1138. cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
  1139. cpu_pmu->start = armv7pmu_start;
  1140. cpu_pmu->stop = armv7pmu_stop;
  1141. cpu_pmu->reset = armv7pmu_reset;
  1142. cpu_pmu->max_period = (1LLU << 32) - 1;
  1143. };
  1144. static u32 __devinit armv7_read_num_pmnc_events(void)
  1145. {
  1146. u32 nb_cnt;
  1147. /* Read the nb of CNTx counters supported from PMNC */
  1148. nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
  1149. /* Add the CPU cycles counter and return */
  1150. return nb_cnt + 1;
  1151. }
  1152. static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1153. {
  1154. armv7pmu_init(cpu_pmu);
  1155. cpu_pmu->name = "ARMv7 Cortex-A8";
  1156. cpu_pmu->map_event = armv7_a8_map_event;
  1157. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1158. return 0;
  1159. }
  1160. static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1161. {
  1162. armv7pmu_init(cpu_pmu);
  1163. cpu_pmu->name = "ARMv7 Cortex-A9";
  1164. cpu_pmu->map_event = armv7_a9_map_event;
  1165. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1166. return 0;
  1167. }
  1168. static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1169. {
  1170. armv7pmu_init(cpu_pmu);
  1171. cpu_pmu->name = "ARMv7 Cortex-A5";
  1172. cpu_pmu->map_event = armv7_a5_map_event;
  1173. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1174. return 0;
  1175. }
  1176. static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1177. {
  1178. armv7pmu_init(cpu_pmu);
  1179. cpu_pmu->name = "ARMv7 Cortex-A15";
  1180. cpu_pmu->map_event = armv7_a15_map_event;
  1181. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1182. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1183. return 0;
  1184. }
  1185. static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1186. {
  1187. armv7pmu_init(cpu_pmu);
  1188. cpu_pmu->name = "ARMv7 Cortex-A7";
  1189. cpu_pmu->map_event = armv7_a7_map_event;
  1190. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1191. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1192. return 0;
  1193. }
  1194. #else
  1195. static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1196. {
  1197. return -ENODEV;
  1198. }
  1199. static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1200. {
  1201. return -ENODEV;
  1202. }
  1203. static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1204. {
  1205. return -ENODEV;
  1206. }
  1207. static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1208. {
  1209. return -ENODEV;
  1210. }
  1211. static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1212. {
  1213. return -ENODEV;
  1214. }
  1215. #endif /* CONFIG_CPU_V7 */