perf_event_v7.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
  3. *
  4. * ARMv7 support: Jean Pihet <jpihet@mvista.com>
  5. * 2010 (c) MontaVista Software, LLC.
  6. *
  7. * Copied from ARMv6 code, with the low level code inspired
  8. * by the ARMv7 Oprofile code.
  9. *
  10. * Cortex-A8 has up to 4 configurable performance counters and
  11. * a single cycle counter.
  12. * Cortex-A9 has up to 31 configurable performance counters and
  13. * a single cycle counter.
  14. *
  15. * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16. * counter and all 4 performance counters together can be reset separately.
  17. */
  18. #ifdef CONFIG_CPU_V7
  19. /*
  20. * Common ARMv7 event types
  21. *
  22. * Note: An implementation may not be able to count all of these events
  23. * but the encodings are considered to be `reserved' in the case that
  24. * they are not available.
  25. */
  26. enum armv7_perf_types {
  27. ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
  28. ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
  29. ARMV7_PERFCTR_ITLB_REFILL = 0x02,
  30. ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
  31. ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
  32. ARMV7_PERFCTR_DTLB_REFILL = 0x05,
  33. ARMV7_PERFCTR_MEM_READ = 0x06,
  34. ARMV7_PERFCTR_MEM_WRITE = 0x07,
  35. ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
  36. ARMV7_PERFCTR_EXC_TAKEN = 0x09,
  37. ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
  38. ARMV7_PERFCTR_CID_WRITE = 0x0B,
  39. /*
  40. * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  41. * It counts:
  42. * - all (taken) branch instructions,
  43. * - instructions that explicitly write the PC,
  44. * - exception generating instructions.
  45. */
  46. ARMV7_PERFCTR_PC_WRITE = 0x0C,
  47. ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
  48. ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
  49. ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
  50. ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
  51. ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
  52. ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
  53. /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  54. ARMV7_PERFCTR_MEM_ACCESS = 0x13,
  55. ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
  56. ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
  57. ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
  58. ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
  59. ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
  60. ARMV7_PERFCTR_BUS_ACCESS = 0x19,
  61. ARMV7_PERFCTR_MEM_ERROR = 0x1A,
  62. ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
  63. ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
  64. ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
  65. ARMV7_PERFCTR_CPU_CYCLES = 0xFF
  66. };
  67. /* ARMv7 Cortex-A8 specific event types */
  68. enum armv7_a8_perf_types {
  69. ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
  70. ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
  71. ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
  72. ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
  73. };
  74. /* ARMv7 Cortex-A9 specific event types */
  75. enum armv7_a9_perf_types {
  76. ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
  77. ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
  78. ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
  79. };
  80. /* ARMv7 Cortex-A5 specific event types */
  81. enum armv7_a5_perf_types {
  82. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
  83. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
  84. };
  85. /* ARMv7 Cortex-A15 specific event types */
  86. enum armv7_a15_perf_types {
  87. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
  88. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
  89. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
  90. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
  91. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
  92. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
  93. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
  94. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
  95. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
  96. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
  97. ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
  98. };
  99. /*
  100. * Cortex-A8 HW events mapping
  101. *
  102. * The hardware events that we support. We do support cache operations but
  103. * we have harvard caches and no way to combine instruction and data
  104. * accesses/misses in hardware.
  105. */
  106. static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
  107. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  108. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  109. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  110. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  111. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  112. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  113. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  114. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
  115. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  116. };
  117. static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  118. [PERF_COUNT_HW_CACHE_OP_MAX]
  119. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  120. [C(L1D)] = {
  121. /*
  122. * The performance counters don't differentiate between read
  123. * and write accesses/misses so this isn't strictly correct,
  124. * but it's the best we can do. Writes and reads get
  125. * combined.
  126. */
  127. [C(OP_READ)] = {
  128. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  129. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  130. },
  131. [C(OP_WRITE)] = {
  132. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  133. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  134. },
  135. [C(OP_PREFETCH)] = {
  136. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  137. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  138. },
  139. },
  140. [C(L1I)] = {
  141. [C(OP_READ)] = {
  142. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
  143. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  144. },
  145. [C(OP_WRITE)] = {
  146. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  147. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  148. },
  149. [C(OP_PREFETCH)] = {
  150. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  151. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  152. },
  153. },
  154. [C(LL)] = {
  155. [C(OP_READ)] = {
  156. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  157. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  158. },
  159. [C(OP_WRITE)] = {
  160. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  161. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  162. },
  163. [C(OP_PREFETCH)] = {
  164. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  165. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  166. },
  167. },
  168. [C(DTLB)] = {
  169. [C(OP_READ)] = {
  170. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  171. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  172. },
  173. [C(OP_WRITE)] = {
  174. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  175. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  176. },
  177. [C(OP_PREFETCH)] = {
  178. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  179. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  180. },
  181. },
  182. [C(ITLB)] = {
  183. [C(OP_READ)] = {
  184. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  185. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  186. },
  187. [C(OP_WRITE)] = {
  188. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  189. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  190. },
  191. [C(OP_PREFETCH)] = {
  192. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  193. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  194. },
  195. },
  196. [C(BPU)] = {
  197. [C(OP_READ)] = {
  198. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  199. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  200. },
  201. [C(OP_WRITE)] = {
  202. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  203. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  204. },
  205. [C(OP_PREFETCH)] = {
  206. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  207. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  208. },
  209. },
  210. [C(NODE)] = {
  211. [C(OP_READ)] = {
  212. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  213. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  214. },
  215. [C(OP_WRITE)] = {
  216. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  217. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  218. },
  219. [C(OP_PREFETCH)] = {
  220. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  221. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  222. },
  223. },
  224. };
  225. /*
  226. * Cortex-A9 HW events mapping
  227. */
  228. static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
  229. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  230. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
  231. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  232. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  233. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  234. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  235. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  236. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
  237. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
  238. };
  239. static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  240. [PERF_COUNT_HW_CACHE_OP_MAX]
  241. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  242. [C(L1D)] = {
  243. /*
  244. * The performance counters don't differentiate between read
  245. * and write accesses/misses so this isn't strictly correct,
  246. * but it's the best we can do. Writes and reads get
  247. * combined.
  248. */
  249. [C(OP_READ)] = {
  250. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  251. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  252. },
  253. [C(OP_WRITE)] = {
  254. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  255. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  256. },
  257. [C(OP_PREFETCH)] = {
  258. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  259. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  260. },
  261. },
  262. [C(L1I)] = {
  263. [C(OP_READ)] = {
  264. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  265. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  266. },
  267. [C(OP_WRITE)] = {
  268. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  269. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  270. },
  271. [C(OP_PREFETCH)] = {
  272. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  273. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  274. },
  275. },
  276. [C(LL)] = {
  277. [C(OP_READ)] = {
  278. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  279. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  280. },
  281. [C(OP_WRITE)] = {
  282. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  283. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  284. },
  285. [C(OP_PREFETCH)] = {
  286. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  287. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  288. },
  289. },
  290. [C(DTLB)] = {
  291. [C(OP_READ)] = {
  292. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  293. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  294. },
  295. [C(OP_WRITE)] = {
  296. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  297. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  298. },
  299. [C(OP_PREFETCH)] = {
  300. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  301. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  302. },
  303. },
  304. [C(ITLB)] = {
  305. [C(OP_READ)] = {
  306. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  307. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  308. },
  309. [C(OP_WRITE)] = {
  310. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  311. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  312. },
  313. [C(OP_PREFETCH)] = {
  314. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  315. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  316. },
  317. },
  318. [C(BPU)] = {
  319. [C(OP_READ)] = {
  320. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  321. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  322. },
  323. [C(OP_WRITE)] = {
  324. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  325. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  326. },
  327. [C(OP_PREFETCH)] = {
  328. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  329. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  330. },
  331. },
  332. [C(NODE)] = {
  333. [C(OP_READ)] = {
  334. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  335. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  336. },
  337. [C(OP_WRITE)] = {
  338. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  339. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  340. },
  341. [C(OP_PREFETCH)] = {
  342. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  343. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  344. },
  345. },
  346. };
  347. /*
  348. * Cortex-A5 HW events mapping
  349. */
  350. static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
  351. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  352. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  353. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  354. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  355. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  356. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  357. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  358. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  359. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  360. };
  361. static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  362. [PERF_COUNT_HW_CACHE_OP_MAX]
  363. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  364. [C(L1D)] = {
  365. [C(OP_READ)] = {
  366. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  367. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  368. },
  369. [C(OP_WRITE)] = {
  370. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  371. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  372. },
  373. [C(OP_PREFETCH)] = {
  374. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  375. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  376. },
  377. },
  378. [C(L1I)] = {
  379. [C(OP_READ)] = {
  380. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  381. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  382. },
  383. [C(OP_WRITE)] = {
  384. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  385. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  386. },
  387. /*
  388. * The prefetch counters don't differentiate between the I
  389. * side and the D side.
  390. */
  391. [C(OP_PREFETCH)] = {
  392. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  393. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  394. },
  395. },
  396. [C(LL)] = {
  397. [C(OP_READ)] = {
  398. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  399. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  400. },
  401. [C(OP_WRITE)] = {
  402. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  403. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  404. },
  405. [C(OP_PREFETCH)] = {
  406. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  407. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  408. },
  409. },
  410. [C(DTLB)] = {
  411. [C(OP_READ)] = {
  412. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  413. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  414. },
  415. [C(OP_WRITE)] = {
  416. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  417. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  418. },
  419. [C(OP_PREFETCH)] = {
  420. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  421. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  422. },
  423. },
  424. [C(ITLB)] = {
  425. [C(OP_READ)] = {
  426. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  427. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  428. },
  429. [C(OP_WRITE)] = {
  430. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  431. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  432. },
  433. [C(OP_PREFETCH)] = {
  434. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  435. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  436. },
  437. },
  438. [C(BPU)] = {
  439. [C(OP_READ)] = {
  440. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  441. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  442. },
  443. [C(OP_WRITE)] = {
  444. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  445. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  446. },
  447. [C(OP_PREFETCH)] = {
  448. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  449. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  450. },
  451. },
  452. [C(NODE)] = {
  453. [C(OP_READ)] = {
  454. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  455. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  456. },
  457. [C(OP_WRITE)] = {
  458. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  459. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  460. },
  461. [C(OP_PREFETCH)] = {
  462. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  463. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  464. },
  465. },
  466. };
  467. /*
  468. * Cortex-A15 HW events mapping
  469. */
  470. static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
  471. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  472. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  473. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  474. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  475. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
  476. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  477. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  478. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  479. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  480. };
  481. static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  482. [PERF_COUNT_HW_CACHE_OP_MAX]
  483. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  484. [C(L1D)] = {
  485. [C(OP_READ)] = {
  486. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
  487. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
  488. },
  489. [C(OP_WRITE)] = {
  490. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
  491. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
  492. },
  493. [C(OP_PREFETCH)] = {
  494. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  495. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  496. },
  497. },
  498. [C(L1I)] = {
  499. /*
  500. * Not all performance counters differentiate between read
  501. * and write accesses/misses so we're not always strictly
  502. * correct, but it's the best we can do. Writes and reads get
  503. * combined in these cases.
  504. */
  505. [C(OP_READ)] = {
  506. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  507. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  508. },
  509. [C(OP_WRITE)] = {
  510. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  511. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  512. },
  513. [C(OP_PREFETCH)] = {
  514. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  515. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  516. },
  517. },
  518. [C(LL)] = {
  519. [C(OP_READ)] = {
  520. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
  521. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
  522. },
  523. [C(OP_WRITE)] = {
  524. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
  525. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
  526. },
  527. [C(OP_PREFETCH)] = {
  528. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  529. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  530. },
  531. },
  532. [C(DTLB)] = {
  533. [C(OP_READ)] = {
  534. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  535. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
  536. },
  537. [C(OP_WRITE)] = {
  538. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  539. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
  540. },
  541. [C(OP_PREFETCH)] = {
  542. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  543. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  544. },
  545. },
  546. [C(ITLB)] = {
  547. [C(OP_READ)] = {
  548. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  549. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  550. },
  551. [C(OP_WRITE)] = {
  552. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  553. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  554. },
  555. [C(OP_PREFETCH)] = {
  556. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  557. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  558. },
  559. },
  560. [C(BPU)] = {
  561. [C(OP_READ)] = {
  562. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  563. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  564. },
  565. [C(OP_WRITE)] = {
  566. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  567. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  568. },
  569. [C(OP_PREFETCH)] = {
  570. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  571. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  572. },
  573. },
  574. [C(NODE)] = {
  575. [C(OP_READ)] = {
  576. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  577. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  578. },
  579. [C(OP_WRITE)] = {
  580. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  581. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  582. },
  583. [C(OP_PREFETCH)] = {
  584. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  585. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  586. },
  587. },
  588. };
  589. /*
  590. * Cortex-A7 HW events mapping
  591. */
  592. static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
  593. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  594. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  595. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  596. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  597. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  598. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  599. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  600. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  601. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  602. };
  603. static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  604. [PERF_COUNT_HW_CACHE_OP_MAX]
  605. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  606. [C(L1D)] = {
  607. /*
  608. * The performance counters don't differentiate between read
  609. * and write accesses/misses so this isn't strictly correct,
  610. * but it's the best we can do. Writes and reads get
  611. * combined.
  612. */
  613. [C(OP_READ)] = {
  614. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  615. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  616. },
  617. [C(OP_WRITE)] = {
  618. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  619. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  620. },
  621. [C(OP_PREFETCH)] = {
  622. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  623. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  624. },
  625. },
  626. [C(L1I)] = {
  627. [C(OP_READ)] = {
  628. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  629. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  630. },
  631. [C(OP_WRITE)] = {
  632. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  633. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  634. },
  635. [C(OP_PREFETCH)] = {
  636. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  637. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  638. },
  639. },
  640. [C(LL)] = {
  641. [C(OP_READ)] = {
  642. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  643. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  644. },
  645. [C(OP_WRITE)] = {
  646. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  647. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  648. },
  649. [C(OP_PREFETCH)] = {
  650. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  651. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  652. },
  653. },
  654. [C(DTLB)] = {
  655. [C(OP_READ)] = {
  656. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  657. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  658. },
  659. [C(OP_WRITE)] = {
  660. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  661. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  662. },
  663. [C(OP_PREFETCH)] = {
  664. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  665. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  666. },
  667. },
  668. [C(ITLB)] = {
  669. [C(OP_READ)] = {
  670. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  671. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  672. },
  673. [C(OP_WRITE)] = {
  674. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  675. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  676. },
  677. [C(OP_PREFETCH)] = {
  678. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  679. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  680. },
  681. },
  682. [C(BPU)] = {
  683. [C(OP_READ)] = {
  684. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  685. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  686. },
  687. [C(OP_WRITE)] = {
  688. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  689. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  690. },
  691. [C(OP_PREFETCH)] = {
  692. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  693. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  694. },
  695. },
  696. [C(NODE)] = {
  697. [C(OP_READ)] = {
  698. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  699. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  700. },
  701. [C(OP_WRITE)] = {
  702. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  703. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  704. },
  705. [C(OP_PREFETCH)] = {
  706. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  707. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  708. },
  709. },
  710. };
  711. /*
  712. * Perf Events' indices
  713. */
  714. #define ARMV7_IDX_CYCLE_COUNTER 0
  715. #define ARMV7_IDX_COUNTER0 1
  716. #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
  717. (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  718. #define ARMV7_MAX_COUNTERS 32
  719. #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
  720. /*
  721. * ARMv7 low level PMNC access
  722. */
  723. /*
  724. * Perf Event to low level counters mapping
  725. */
  726. #define ARMV7_IDX_TO_COUNTER(x) \
  727. (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
  728. /*
  729. * Per-CPU PMNC: config reg
  730. */
  731. #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
  732. #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
  733. #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
  734. #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  735. #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
  736. #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  737. #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
  738. #define ARMV7_PMNC_N_MASK 0x1f
  739. #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
  740. /*
  741. * FLAG: counters overflow flag status reg
  742. */
  743. #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
  744. #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
  745. /*
  746. * PMXEVTYPER: Event selection reg
  747. */
  748. #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
  749. #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
  750. /*
  751. * Event filters for PMUv2
  752. */
  753. #define ARMV7_EXCLUDE_PL1 (1 << 31)
  754. #define ARMV7_EXCLUDE_USER (1 << 30)
  755. #define ARMV7_INCLUDE_HYP (1 << 27)
  756. static inline u32 armv7_pmnc_read(void)
  757. {
  758. u32 val;
  759. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
  760. return val;
  761. }
  762. static inline void armv7_pmnc_write(u32 val)
  763. {
  764. val &= ARMV7_PMNC_MASK;
  765. isb();
  766. asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
  767. }
  768. static inline int armv7_pmnc_has_overflowed(u32 pmnc)
  769. {
  770. return pmnc & ARMV7_OVERFLOWED_MASK;
  771. }
  772. static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
  773. {
  774. return idx >= ARMV7_IDX_CYCLE_COUNTER &&
  775. idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
  776. }
  777. static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
  778. {
  779. return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
  780. }
  781. static inline int armv7_pmnc_select_counter(int idx)
  782. {
  783. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  784. asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
  785. isb();
  786. return idx;
  787. }
  788. static inline u32 armv7pmu_read_counter(struct perf_event *event)
  789. {
  790. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  791. struct hw_perf_event *hwc = &event->hw;
  792. int idx = hwc->idx;
  793. u32 value = 0;
  794. if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
  795. pr_err("CPU%u reading wrong counter %d\n",
  796. smp_processor_id(), idx);
  797. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  798. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
  799. else if (armv7_pmnc_select_counter(idx) == idx)
  800. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
  801. return value;
  802. }
  803. static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
  804. {
  805. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  806. struct hw_perf_event *hwc = &event->hw;
  807. int idx = hwc->idx;
  808. if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
  809. pr_err("CPU%u writing wrong counter %d\n",
  810. smp_processor_id(), idx);
  811. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  812. asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
  813. else if (armv7_pmnc_select_counter(idx) == idx)
  814. asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
  815. }
  816. static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
  817. {
  818. if (armv7_pmnc_select_counter(idx) == idx) {
  819. val &= ARMV7_EVTYPE_MASK;
  820. asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
  821. }
  822. }
  823. static inline int armv7_pmnc_enable_counter(int idx)
  824. {
  825. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  826. asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
  827. return idx;
  828. }
  829. static inline int armv7_pmnc_disable_counter(int idx)
  830. {
  831. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  832. asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
  833. return idx;
  834. }
  835. static inline int armv7_pmnc_enable_intens(int idx)
  836. {
  837. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  838. asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
  839. return idx;
  840. }
  841. static inline int armv7_pmnc_disable_intens(int idx)
  842. {
  843. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  844. asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
  845. isb();
  846. /* Clear the overflow flag in case an interrupt is pending. */
  847. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
  848. isb();
  849. return idx;
  850. }
  851. static inline u32 armv7_pmnc_getreset_flags(void)
  852. {
  853. u32 val;
  854. /* Read */
  855. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  856. /* Write to clear flags */
  857. val &= ARMV7_FLAG_MASK;
  858. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
  859. return val;
  860. }
  861. #ifdef DEBUG
  862. static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
  863. {
  864. u32 val;
  865. unsigned int cnt;
  866. printk(KERN_INFO "PMNC registers dump:\n");
  867. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
  868. printk(KERN_INFO "PMNC =0x%08x\n", val);
  869. asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
  870. printk(KERN_INFO "CNTENS=0x%08x\n", val);
  871. asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
  872. printk(KERN_INFO "INTENS=0x%08x\n", val);
  873. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  874. printk(KERN_INFO "FLAGS =0x%08x\n", val);
  875. asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
  876. printk(KERN_INFO "SELECT=0x%08x\n", val);
  877. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
  878. printk(KERN_INFO "CCNT =0x%08x\n", val);
  879. for (cnt = ARMV7_IDX_COUNTER0;
  880. cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
  881. armv7_pmnc_select_counter(cnt);
  882. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
  883. printk(KERN_INFO "CNT[%d] count =0x%08x\n",
  884. ARMV7_IDX_TO_COUNTER(cnt), val);
  885. asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
  886. printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
  887. ARMV7_IDX_TO_COUNTER(cnt), val);
  888. }
  889. }
  890. #endif
  891. static void armv7pmu_enable_event(struct perf_event *event)
  892. {
  893. unsigned long flags;
  894. struct hw_perf_event *hwc = &event->hw;
  895. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  896. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  897. int idx = hwc->idx;
  898. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  899. pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
  900. smp_processor_id(), idx);
  901. return;
  902. }
  903. /*
  904. * Enable counter and interrupt, and set the counter to count
  905. * the event that we're interested in.
  906. */
  907. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  908. /*
  909. * Disable counter
  910. */
  911. armv7_pmnc_disable_counter(idx);
  912. /*
  913. * Set event (if destined for PMNx counters)
  914. * We only need to set the event for the cycle counter if we
  915. * have the ability to perform event filtering.
  916. */
  917. if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
  918. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  919. /*
  920. * Enable interrupt for this counter
  921. */
  922. armv7_pmnc_enable_intens(idx);
  923. /*
  924. * Enable counter
  925. */
  926. armv7_pmnc_enable_counter(idx);
  927. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  928. }
  929. static void armv7pmu_disable_event(struct perf_event *event)
  930. {
  931. unsigned long flags;
  932. struct hw_perf_event *hwc = &event->hw;
  933. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  934. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  935. int idx = hwc->idx;
  936. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  937. pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
  938. smp_processor_id(), idx);
  939. return;
  940. }
  941. /*
  942. * Disable counter and interrupt
  943. */
  944. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  945. /*
  946. * Disable counter
  947. */
  948. armv7_pmnc_disable_counter(idx);
  949. /*
  950. * Disable interrupt for this counter
  951. */
  952. armv7_pmnc_disable_intens(idx);
  953. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  954. }
  955. static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
  956. {
  957. u32 pmnc;
  958. struct perf_sample_data data;
  959. struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
  960. struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
  961. struct pt_regs *regs;
  962. int idx;
  963. /*
  964. * Get and reset the IRQ flags
  965. */
  966. pmnc = armv7_pmnc_getreset_flags();
  967. /*
  968. * Did an overflow occur?
  969. */
  970. if (!armv7_pmnc_has_overflowed(pmnc))
  971. return IRQ_NONE;
  972. /*
  973. * Handle the counter(s) overflow(s)
  974. */
  975. regs = get_irq_regs();
  976. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  977. struct perf_event *event = cpuc->events[idx];
  978. struct hw_perf_event *hwc;
  979. /* Ignore if we don't have an event. */
  980. if (!event)
  981. continue;
  982. /*
  983. * We have a single interrupt for all counters. Check that
  984. * each counter has overflowed before we process it.
  985. */
  986. if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
  987. continue;
  988. hwc = &event->hw;
  989. armpmu_event_update(event);
  990. perf_sample_data_init(&data, 0, hwc->last_period);
  991. if (!armpmu_event_set_period(event))
  992. continue;
  993. if (perf_event_overflow(event, &data, regs))
  994. cpu_pmu->disable(event);
  995. }
  996. /*
  997. * Handle the pending perf events.
  998. *
  999. * Note: this call *must* be run with interrupts disabled. For
  1000. * platforms that can have the PMU interrupts raised as an NMI, this
  1001. * will not work.
  1002. */
  1003. irq_work_run();
  1004. return IRQ_HANDLED;
  1005. }
  1006. static void armv7pmu_start(struct arm_pmu *cpu_pmu)
  1007. {
  1008. unsigned long flags;
  1009. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1010. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1011. /* Enable all counters */
  1012. armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
  1013. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1014. }
  1015. static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
  1016. {
  1017. unsigned long flags;
  1018. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1019. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1020. /* Disable all counters */
  1021. armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
  1022. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1023. }
  1024. static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
  1025. struct perf_event *event)
  1026. {
  1027. int idx;
  1028. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1029. struct hw_perf_event *hwc = &event->hw;
  1030. unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
  1031. /* Always place a cycle counter into the cycle counter. */
  1032. if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
  1033. if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
  1034. return -EAGAIN;
  1035. return ARMV7_IDX_CYCLE_COUNTER;
  1036. }
  1037. /*
  1038. * For anything other than a cycle counter, try and use
  1039. * the events counters
  1040. */
  1041. for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  1042. if (!test_and_set_bit(idx, cpuc->used_mask))
  1043. return idx;
  1044. }
  1045. /* The counters are all in use. */
  1046. return -EAGAIN;
  1047. }
  1048. /*
  1049. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  1050. */
  1051. static int armv7pmu_set_event_filter(struct hw_perf_event *event,
  1052. struct perf_event_attr *attr)
  1053. {
  1054. unsigned long config_base = 0;
  1055. if (attr->exclude_idle)
  1056. return -EPERM;
  1057. if (attr->exclude_user)
  1058. config_base |= ARMV7_EXCLUDE_USER;
  1059. if (attr->exclude_kernel)
  1060. config_base |= ARMV7_EXCLUDE_PL1;
  1061. if (!attr->exclude_hv)
  1062. config_base |= ARMV7_INCLUDE_HYP;
  1063. /*
  1064. * Install the filter into config_base as this is used to
  1065. * construct the event type.
  1066. */
  1067. event->config_base = config_base;
  1068. return 0;
  1069. }
  1070. static void armv7pmu_reset(void *info)
  1071. {
  1072. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  1073. u32 idx, nb_cnt = cpu_pmu->num_events;
  1074. /* The counter and interrupt enable registers are unknown at reset. */
  1075. for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  1076. armv7_pmnc_disable_counter(idx);
  1077. armv7_pmnc_disable_intens(idx);
  1078. }
  1079. /* Initialize & Reset PMNC: C and P bits */
  1080. armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
  1081. }
  1082. static int armv7_a8_map_event(struct perf_event *event)
  1083. {
  1084. return armpmu_map_event(event, &armv7_a8_perf_map,
  1085. &armv7_a8_perf_cache_map, 0xFF);
  1086. }
  1087. static int armv7_a9_map_event(struct perf_event *event)
  1088. {
  1089. return armpmu_map_event(event, &armv7_a9_perf_map,
  1090. &armv7_a9_perf_cache_map, 0xFF);
  1091. }
  1092. static int armv7_a5_map_event(struct perf_event *event)
  1093. {
  1094. return armpmu_map_event(event, &armv7_a5_perf_map,
  1095. &armv7_a5_perf_cache_map, 0xFF);
  1096. }
  1097. static int armv7_a15_map_event(struct perf_event *event)
  1098. {
  1099. return armpmu_map_event(event, &armv7_a15_perf_map,
  1100. &armv7_a15_perf_cache_map, 0xFF);
  1101. }
  1102. static int armv7_a7_map_event(struct perf_event *event)
  1103. {
  1104. return armpmu_map_event(event, &armv7_a7_perf_map,
  1105. &armv7_a7_perf_cache_map, 0xFF);
  1106. }
  1107. static void armv7pmu_init(struct arm_pmu *cpu_pmu)
  1108. {
  1109. cpu_pmu->handle_irq = armv7pmu_handle_irq;
  1110. cpu_pmu->enable = armv7pmu_enable_event;
  1111. cpu_pmu->disable = armv7pmu_disable_event;
  1112. cpu_pmu->read_counter = armv7pmu_read_counter;
  1113. cpu_pmu->write_counter = armv7pmu_write_counter;
  1114. cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
  1115. cpu_pmu->start = armv7pmu_start;
  1116. cpu_pmu->stop = armv7pmu_stop;
  1117. cpu_pmu->reset = armv7pmu_reset;
  1118. cpu_pmu->max_period = (1LLU << 32) - 1;
  1119. };
  1120. static u32 armv7_read_num_pmnc_events(void)
  1121. {
  1122. u32 nb_cnt;
  1123. /* Read the nb of CNTx counters supported from PMNC */
  1124. nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
  1125. /* Add the CPU cycles counter and return */
  1126. return nb_cnt + 1;
  1127. }
  1128. static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1129. {
  1130. armv7pmu_init(cpu_pmu);
  1131. cpu_pmu->name = "ARMv7 Cortex-A8";
  1132. cpu_pmu->map_event = armv7_a8_map_event;
  1133. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1134. return 0;
  1135. }
  1136. static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1137. {
  1138. armv7pmu_init(cpu_pmu);
  1139. cpu_pmu->name = "ARMv7 Cortex-A9";
  1140. cpu_pmu->map_event = armv7_a9_map_event;
  1141. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1142. return 0;
  1143. }
  1144. static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1145. {
  1146. armv7pmu_init(cpu_pmu);
  1147. cpu_pmu->name = "ARMv7 Cortex-A5";
  1148. cpu_pmu->map_event = armv7_a5_map_event;
  1149. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1150. return 0;
  1151. }
  1152. static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1153. {
  1154. armv7pmu_init(cpu_pmu);
  1155. cpu_pmu->name = "ARMv7 Cortex-A15";
  1156. cpu_pmu->map_event = armv7_a15_map_event;
  1157. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1158. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1159. return 0;
  1160. }
  1161. static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1162. {
  1163. armv7pmu_init(cpu_pmu);
  1164. cpu_pmu->name = "ARMv7 Cortex-A7";
  1165. cpu_pmu->map_event = armv7_a7_map_event;
  1166. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1167. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1168. return 0;
  1169. }
  1170. #else
  1171. static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1172. {
  1173. return -ENODEV;
  1174. }
  1175. static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1176. {
  1177. return -ENODEV;
  1178. }
  1179. static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1180. {
  1181. return -ENODEV;
  1182. }
  1183. static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1184. {
  1185. return -ENODEV;
  1186. }
  1187. static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1188. {
  1189. return -ENODEV;
  1190. }
  1191. #endif /* CONFIG_CPU_V7 */