perf_event_v7.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. /*
  2. * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
  3. *
  4. * ARMv7 support: Jean Pihet <jpihet@mvista.com>
  5. * 2010 (c) MontaVista Software, LLC.
  6. *
  7. * Copied from ARMv6 code, with the low level code inspired
  8. * by the ARMv7 Oprofile code.
  9. *
  10. * Cortex-A8 has up to 4 configurable performance counters and
  11. * a single cycle counter.
  12. * Cortex-A9 has up to 31 configurable performance counters and
  13. * a single cycle counter.
  14. *
  15. * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16. * counter and all 4 performance counters together can be reset separately.
  17. */
  18. #ifdef CONFIG_CPU_V7
  19. /*
  20. * Common ARMv7 event types
  21. *
  22. * Note: An implementation may not be able to count all of these events
  23. * but the encodings are considered to be `reserved' in the case that
  24. * they are not available.
  25. */
  26. enum armv7_perf_types {
  27. ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
  28. ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
  29. ARMV7_PERFCTR_ITLB_REFILL = 0x02,
  30. ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
  31. ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
  32. ARMV7_PERFCTR_DTLB_REFILL = 0x05,
  33. ARMV7_PERFCTR_MEM_READ = 0x06,
  34. ARMV7_PERFCTR_MEM_WRITE = 0x07,
  35. ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
  36. ARMV7_PERFCTR_EXC_TAKEN = 0x09,
  37. ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
  38. ARMV7_PERFCTR_CID_WRITE = 0x0B,
  39. /*
  40. * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  41. * It counts:
  42. * - all (taken) branch instructions,
  43. * - instructions that explicitly write the PC,
  44. * - exception generating instructions.
  45. */
  46. ARMV7_PERFCTR_PC_WRITE = 0x0C,
  47. ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
  48. ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
  49. ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
  50. ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
  51. ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
  52. ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
  53. /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  54. ARMV7_PERFCTR_MEM_ACCESS = 0x13,
  55. ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
  56. ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
  57. ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
  58. ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
  59. ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
  60. ARMV7_PERFCTR_BUS_ACCESS = 0x19,
  61. ARMV7_PERFCTR_MEM_ERROR = 0x1A,
  62. ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
  63. ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
  64. ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
  65. ARMV7_PERFCTR_CPU_CYCLES = 0xFF
  66. };
  67. /* ARMv7 Cortex-A8 specific event types */
  68. enum armv7_a8_perf_types {
  69. ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
  70. ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
  71. ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
  72. ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
  73. };
  74. /* ARMv7 Cortex-A9 specific event types */
  75. enum armv7_a9_perf_types {
  76. ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
  77. ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
  78. ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
  79. };
  80. /* ARMv7 Cortex-A5 specific event types */
  81. enum armv7_a5_perf_types {
  82. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
  83. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
  84. };
  85. /* ARMv7 Cortex-A15 specific event types */
  86. enum armv7_a15_perf_types {
  87. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
  88. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
  89. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
  90. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
  91. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
  92. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
  93. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
  94. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
  95. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
  96. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
  97. ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
  98. };
  99. /*
  100. * Cortex-A8 HW events mapping
  101. *
  102. * The hardware events that we support. We do support cache operations but
  103. * we have harvard caches and no way to combine instruction and data
  104. * accesses/misses in hardware.
  105. */
  106. static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
  107. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  108. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  109. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  110. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  111. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  112. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  113. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  114. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
  115. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  116. };
  117. static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  118. [PERF_COUNT_HW_CACHE_OP_MAX]
  119. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  120. [C(L1D)] = {
  121. /*
  122. * The performance counters don't differentiate between read
  123. * and write accesses/misses so this isn't strictly correct,
  124. * but it's the best we can do. Writes and reads get
  125. * combined.
  126. */
  127. [C(OP_READ)] = {
  128. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  129. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  130. },
  131. [C(OP_WRITE)] = {
  132. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  133. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  134. },
  135. [C(OP_PREFETCH)] = {
  136. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  137. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  138. },
  139. },
  140. [C(L1I)] = {
  141. [C(OP_READ)] = {
  142. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
  143. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  144. },
  145. [C(OP_WRITE)] = {
  146. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
  147. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  148. },
  149. [C(OP_PREFETCH)] = {
  150. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  151. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  152. },
  153. },
  154. [C(LL)] = {
  155. [C(OP_READ)] = {
  156. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  157. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  158. },
  159. [C(OP_WRITE)] = {
  160. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  161. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  162. },
  163. [C(OP_PREFETCH)] = {
  164. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  165. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  166. },
  167. },
  168. [C(DTLB)] = {
  169. [C(OP_READ)] = {
  170. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  171. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  172. },
  173. [C(OP_WRITE)] = {
  174. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  175. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  176. },
  177. [C(OP_PREFETCH)] = {
  178. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  179. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  180. },
  181. },
  182. [C(ITLB)] = {
  183. [C(OP_READ)] = {
  184. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  185. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  186. },
  187. [C(OP_WRITE)] = {
  188. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  189. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  190. },
  191. [C(OP_PREFETCH)] = {
  192. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  193. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  194. },
  195. },
  196. [C(BPU)] = {
  197. [C(OP_READ)] = {
  198. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  199. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  200. },
  201. [C(OP_WRITE)] = {
  202. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  203. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  204. },
  205. [C(OP_PREFETCH)] = {
  206. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  207. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  208. },
  209. },
  210. [C(NODE)] = {
  211. [C(OP_READ)] = {
  212. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  213. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  214. },
  215. [C(OP_WRITE)] = {
  216. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  217. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  218. },
  219. [C(OP_PREFETCH)] = {
  220. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  221. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  222. },
  223. },
  224. };
  225. /*
  226. * Cortex-A9 HW events mapping
  227. */
  228. static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
  229. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  230. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
  231. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  232. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  233. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  234. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  235. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  236. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
  237. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
  238. };
  239. static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  240. [PERF_COUNT_HW_CACHE_OP_MAX]
  241. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  242. [C(L1D)] = {
  243. /*
  244. * The performance counters don't differentiate between read
  245. * and write accesses/misses so this isn't strictly correct,
  246. * but it's the best we can do. Writes and reads get
  247. * combined.
  248. */
  249. [C(OP_READ)] = {
  250. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  251. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  252. },
  253. [C(OP_WRITE)] = {
  254. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  255. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  256. },
  257. [C(OP_PREFETCH)] = {
  258. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  259. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  260. },
  261. },
  262. [C(L1I)] = {
  263. [C(OP_READ)] = {
  264. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  265. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  266. },
  267. [C(OP_WRITE)] = {
  268. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  269. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  270. },
  271. [C(OP_PREFETCH)] = {
  272. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  273. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  274. },
  275. },
  276. [C(LL)] = {
  277. [C(OP_READ)] = {
  278. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  279. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  280. },
  281. [C(OP_WRITE)] = {
  282. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  283. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  284. },
  285. [C(OP_PREFETCH)] = {
  286. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  287. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  288. },
  289. },
  290. [C(DTLB)] = {
  291. [C(OP_READ)] = {
  292. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  293. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  294. },
  295. [C(OP_WRITE)] = {
  296. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  297. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  298. },
  299. [C(OP_PREFETCH)] = {
  300. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  301. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  302. },
  303. },
  304. [C(ITLB)] = {
  305. [C(OP_READ)] = {
  306. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  307. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  308. },
  309. [C(OP_WRITE)] = {
  310. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  311. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  312. },
  313. [C(OP_PREFETCH)] = {
  314. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  315. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  316. },
  317. },
  318. [C(BPU)] = {
  319. [C(OP_READ)] = {
  320. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  321. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  322. },
  323. [C(OP_WRITE)] = {
  324. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  325. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  326. },
  327. [C(OP_PREFETCH)] = {
  328. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  329. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  330. },
  331. },
  332. [C(NODE)] = {
  333. [C(OP_READ)] = {
  334. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  335. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  336. },
  337. [C(OP_WRITE)] = {
  338. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  339. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  340. },
  341. [C(OP_PREFETCH)] = {
  342. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  343. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  344. },
  345. },
  346. };
  347. /*
  348. * Cortex-A5 HW events mapping
  349. */
  350. static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
  351. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  352. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  353. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  354. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  355. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  356. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  357. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  358. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  359. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  360. };
  361. static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  362. [PERF_COUNT_HW_CACHE_OP_MAX]
  363. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  364. [C(L1D)] = {
  365. [C(OP_READ)] = {
  366. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  367. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  368. },
  369. [C(OP_WRITE)] = {
  370. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  371. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  372. },
  373. [C(OP_PREFETCH)] = {
  374. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  375. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  376. },
  377. },
  378. [C(L1I)] = {
  379. [C(OP_READ)] = {
  380. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  381. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  382. },
  383. [C(OP_WRITE)] = {
  384. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  385. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  386. },
  387. /*
  388. * The prefetch counters don't differentiate between the I
  389. * side and the D side.
  390. */
  391. [C(OP_PREFETCH)] = {
  392. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  393. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  394. },
  395. },
  396. [C(LL)] = {
  397. [C(OP_READ)] = {
  398. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  399. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  400. },
  401. [C(OP_WRITE)] = {
  402. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  403. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  404. },
  405. [C(OP_PREFETCH)] = {
  406. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  407. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  408. },
  409. },
  410. [C(DTLB)] = {
  411. [C(OP_READ)] = {
  412. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  413. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  414. },
  415. [C(OP_WRITE)] = {
  416. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  417. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  418. },
  419. [C(OP_PREFETCH)] = {
  420. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  421. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  422. },
  423. },
  424. [C(ITLB)] = {
  425. [C(OP_READ)] = {
  426. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  427. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  428. },
  429. [C(OP_WRITE)] = {
  430. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  431. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  432. },
  433. [C(OP_PREFETCH)] = {
  434. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  435. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  436. },
  437. },
  438. [C(BPU)] = {
  439. [C(OP_READ)] = {
  440. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  441. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  442. },
  443. [C(OP_WRITE)] = {
  444. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  445. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  446. },
  447. [C(OP_PREFETCH)] = {
  448. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  449. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  450. },
  451. },
  452. [C(NODE)] = {
  453. [C(OP_READ)] = {
  454. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  455. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  456. },
  457. [C(OP_WRITE)] = {
  458. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  459. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  460. },
  461. [C(OP_PREFETCH)] = {
  462. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  463. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  464. },
  465. },
  466. };
  467. /*
  468. * Cortex-A15 HW events mapping
  469. */
  470. static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
  471. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  472. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  473. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  474. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  475. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
  476. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  477. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  478. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  479. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  480. };
  481. static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  482. [PERF_COUNT_HW_CACHE_OP_MAX]
  483. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  484. [C(L1D)] = {
  485. [C(OP_READ)] = {
  486. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
  487. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
  488. },
  489. [C(OP_WRITE)] = {
  490. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
  491. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
  492. },
  493. [C(OP_PREFETCH)] = {
  494. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  495. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  496. },
  497. },
  498. [C(L1I)] = {
  499. /*
  500. * Not all performance counters differentiate between read
  501. * and write accesses/misses so we're not always strictly
  502. * correct, but it's the best we can do. Writes and reads get
  503. * combined in these cases.
  504. */
  505. [C(OP_READ)] = {
  506. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  507. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  508. },
  509. [C(OP_WRITE)] = {
  510. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  511. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  512. },
  513. [C(OP_PREFETCH)] = {
  514. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  515. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  516. },
  517. },
  518. [C(LL)] = {
  519. [C(OP_READ)] = {
  520. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
  521. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
  522. },
  523. [C(OP_WRITE)] = {
  524. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
  525. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
  526. },
  527. [C(OP_PREFETCH)] = {
  528. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  529. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  530. },
  531. },
  532. [C(DTLB)] = {
  533. [C(OP_READ)] = {
  534. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  535. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
  536. },
  537. [C(OP_WRITE)] = {
  538. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  539. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
  540. },
  541. [C(OP_PREFETCH)] = {
  542. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  543. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  544. },
  545. },
  546. [C(ITLB)] = {
  547. [C(OP_READ)] = {
  548. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  549. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  550. },
  551. [C(OP_WRITE)] = {
  552. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  553. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  554. },
  555. [C(OP_PREFETCH)] = {
  556. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  557. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  558. },
  559. },
  560. [C(BPU)] = {
  561. [C(OP_READ)] = {
  562. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  563. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  564. },
  565. [C(OP_WRITE)] = {
  566. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  567. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  568. },
  569. [C(OP_PREFETCH)] = {
  570. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  571. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  572. },
  573. },
  574. [C(NODE)] = {
  575. [C(OP_READ)] = {
  576. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  577. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  578. },
  579. [C(OP_WRITE)] = {
  580. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  581. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  582. },
  583. [C(OP_PREFETCH)] = {
  584. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  585. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  586. },
  587. },
  588. };
  589. /*
  590. * Cortex-A7 HW events mapping
  591. */
  592. static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
  593. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  594. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  595. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  596. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  597. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  598. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  599. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  600. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  601. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  602. };
  603. static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  604. [PERF_COUNT_HW_CACHE_OP_MAX]
  605. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  606. [C(L1D)] = {
  607. /*
  608. * The performance counters don't differentiate between read
  609. * and write accesses/misses so this isn't strictly correct,
  610. * but it's the best we can do. Writes and reads get
  611. * combined.
  612. */
  613. [C(OP_READ)] = {
  614. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  615. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  616. },
  617. [C(OP_WRITE)] = {
  618. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  619. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  620. },
  621. [C(OP_PREFETCH)] = {
  622. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  623. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  624. },
  625. },
  626. [C(L1I)] = {
  627. [C(OP_READ)] = {
  628. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  629. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  630. },
  631. [C(OP_WRITE)] = {
  632. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  633. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  634. },
  635. [C(OP_PREFETCH)] = {
  636. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  637. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  638. },
  639. },
  640. [C(LL)] = {
  641. [C(OP_READ)] = {
  642. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  643. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  644. },
  645. [C(OP_WRITE)] = {
  646. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  647. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  648. },
  649. [C(OP_PREFETCH)] = {
  650. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  651. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  652. },
  653. },
  654. [C(DTLB)] = {
  655. [C(OP_READ)] = {
  656. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  657. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  658. },
  659. [C(OP_WRITE)] = {
  660. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  661. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  662. },
  663. [C(OP_PREFETCH)] = {
  664. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  665. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  666. },
  667. },
  668. [C(ITLB)] = {
  669. [C(OP_READ)] = {
  670. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  671. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  672. },
  673. [C(OP_WRITE)] = {
  674. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  675. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  676. },
  677. [C(OP_PREFETCH)] = {
  678. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  679. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  680. },
  681. },
  682. [C(BPU)] = {
  683. [C(OP_READ)] = {
  684. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  685. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  686. },
  687. [C(OP_WRITE)] = {
  688. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  689. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  690. },
  691. [C(OP_PREFETCH)] = {
  692. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  693. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  694. },
  695. },
  696. [C(NODE)] = {
  697. [C(OP_READ)] = {
  698. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  699. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  700. },
  701. [C(OP_WRITE)] = {
  702. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  703. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  704. },
  705. [C(OP_PREFETCH)] = {
  706. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  707. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  708. },
  709. },
  710. };
  711. /*
  712. * Perf Events' indices
  713. */
  714. #define ARMV7_IDX_CYCLE_COUNTER 0
  715. #define ARMV7_IDX_COUNTER0 1
  716. #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  717. #define ARMV7_MAX_COUNTERS 32
  718. #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
  719. /*
  720. * ARMv7 low level PMNC access
  721. */
  722. /*
  723. * Perf Event to low level counters mapping
  724. */
  725. #define ARMV7_IDX_TO_COUNTER(x) \
  726. (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
  727. /*
  728. * Per-CPU PMNC: config reg
  729. */
  730. #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
  731. #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
  732. #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
  733. #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  734. #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
  735. #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  736. #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
  737. #define ARMV7_PMNC_N_MASK 0x1f
  738. #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
  739. /*
  740. * FLAG: counters overflow flag status reg
  741. */
  742. #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
  743. #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
  744. /*
  745. * PMXEVTYPER: Event selection reg
  746. */
  747. #define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
  748. #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
  749. /*
  750. * Event filters for PMUv2
  751. */
  752. #define ARMV7_EXCLUDE_PL1 (1 << 31)
  753. #define ARMV7_EXCLUDE_USER (1 << 30)
  754. #define ARMV7_INCLUDE_HYP (1 << 27)
  755. static inline u32 armv7_pmnc_read(void)
  756. {
  757. u32 val;
  758. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
  759. return val;
  760. }
  761. static inline void armv7_pmnc_write(u32 val)
  762. {
  763. val &= ARMV7_PMNC_MASK;
  764. isb();
  765. asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
  766. }
  767. static inline int armv7_pmnc_has_overflowed(u32 pmnc)
  768. {
  769. return pmnc & ARMV7_OVERFLOWED_MASK;
  770. }
  771. static inline int armv7_pmnc_counter_valid(int idx)
  772. {
  773. return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
  774. }
  775. static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
  776. {
  777. int ret = 0;
  778. u32 counter;
  779. if (!armv7_pmnc_counter_valid(idx)) {
  780. pr_err("CPU%u checking wrong counter %d overflow status\n",
  781. smp_processor_id(), idx);
  782. } else {
  783. counter = ARMV7_IDX_TO_COUNTER(idx);
  784. ret = pmnc & BIT(counter);
  785. }
  786. return ret;
  787. }
  788. static inline int armv7_pmnc_select_counter(int idx)
  789. {
  790. u32 counter;
  791. if (!armv7_pmnc_counter_valid(idx)) {
  792. pr_err("CPU%u selecting wrong PMNC counter %d\n",
  793. smp_processor_id(), idx);
  794. return -EINVAL;
  795. }
  796. counter = ARMV7_IDX_TO_COUNTER(idx);
  797. asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
  798. isb();
  799. return idx;
  800. }
  801. static inline u32 armv7pmu_read_counter(int idx)
  802. {
  803. u32 value = 0;
  804. if (!armv7_pmnc_counter_valid(idx))
  805. pr_err("CPU%u reading wrong counter %d\n",
  806. smp_processor_id(), idx);
  807. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  808. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
  809. else if (armv7_pmnc_select_counter(idx) == idx)
  810. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
  811. return value;
  812. }
  813. static inline void armv7pmu_write_counter(int idx, u32 value)
  814. {
  815. if (!armv7_pmnc_counter_valid(idx))
  816. pr_err("CPU%u writing wrong counter %d\n",
  817. smp_processor_id(), idx);
  818. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  819. asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
  820. else if (armv7_pmnc_select_counter(idx) == idx)
  821. asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
  822. }
  823. static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
  824. {
  825. if (armv7_pmnc_select_counter(idx) == idx) {
  826. val &= ARMV7_EVTYPE_MASK;
  827. asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
  828. }
  829. }
  830. static inline int armv7_pmnc_enable_counter(int idx)
  831. {
  832. u32 counter;
  833. if (!armv7_pmnc_counter_valid(idx)) {
  834. pr_err("CPU%u enabling wrong PMNC counter %d\n",
  835. smp_processor_id(), idx);
  836. return -EINVAL;
  837. }
  838. counter = ARMV7_IDX_TO_COUNTER(idx);
  839. asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
  840. return idx;
  841. }
  842. static inline int armv7_pmnc_disable_counter(int idx)
  843. {
  844. u32 counter;
  845. if (!armv7_pmnc_counter_valid(idx)) {
  846. pr_err("CPU%u disabling wrong PMNC counter %d\n",
  847. smp_processor_id(), idx);
  848. return -EINVAL;
  849. }
  850. counter = ARMV7_IDX_TO_COUNTER(idx);
  851. asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
  852. return idx;
  853. }
  854. static inline int armv7_pmnc_enable_intens(int idx)
  855. {
  856. u32 counter;
  857. if (!armv7_pmnc_counter_valid(idx)) {
  858. pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
  859. smp_processor_id(), idx);
  860. return -EINVAL;
  861. }
  862. counter = ARMV7_IDX_TO_COUNTER(idx);
  863. asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
  864. return idx;
  865. }
  866. static inline int armv7_pmnc_disable_intens(int idx)
  867. {
  868. u32 counter;
  869. if (!armv7_pmnc_counter_valid(idx)) {
  870. pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
  871. smp_processor_id(), idx);
  872. return -EINVAL;
  873. }
  874. counter = ARMV7_IDX_TO_COUNTER(idx);
  875. asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
  876. isb();
  877. /* Clear the overflow flag in case an interrupt is pending. */
  878. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
  879. isb();
  880. return idx;
  881. }
  882. static inline u32 armv7_pmnc_getreset_flags(void)
  883. {
  884. u32 val;
  885. /* Read */
  886. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  887. /* Write to clear flags */
  888. val &= ARMV7_FLAG_MASK;
  889. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
  890. return val;
  891. }
  892. #ifdef DEBUG
  893. static void armv7_pmnc_dump_regs(void)
  894. {
  895. u32 val;
  896. unsigned int cnt;
  897. printk(KERN_INFO "PMNC registers dump:\n");
  898. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
  899. printk(KERN_INFO "PMNC =0x%08x\n", val);
  900. asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
  901. printk(KERN_INFO "CNTENS=0x%08x\n", val);
  902. asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
  903. printk(KERN_INFO "INTENS=0x%08x\n", val);
  904. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  905. printk(KERN_INFO "FLAGS =0x%08x\n", val);
  906. asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
  907. printk(KERN_INFO "SELECT=0x%08x\n", val);
  908. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
  909. printk(KERN_INFO "CCNT =0x%08x\n", val);
  910. for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
  911. armv7_pmnc_select_counter(cnt);
  912. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
  913. printk(KERN_INFO "CNT[%d] count =0x%08x\n",
  914. ARMV7_IDX_TO_COUNTER(cnt), val);
  915. asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
  916. printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
  917. ARMV7_IDX_TO_COUNTER(cnt), val);
  918. }
  919. }
  920. #endif
  921. static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
  922. {
  923. unsigned long flags;
  924. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  925. /*
  926. * Enable counter and interrupt, and set the counter to count
  927. * the event that we're interested in.
  928. */
  929. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  930. /*
  931. * Disable counter
  932. */
  933. armv7_pmnc_disable_counter(idx);
  934. /*
  935. * Set event (if destined for PMNx counters)
  936. * We only need to set the event for the cycle counter if we
  937. * have the ability to perform event filtering.
  938. */
  939. if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
  940. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  941. /*
  942. * Enable interrupt for this counter
  943. */
  944. armv7_pmnc_enable_intens(idx);
  945. /*
  946. * Enable counter
  947. */
  948. armv7_pmnc_enable_counter(idx);
  949. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  950. }
  951. static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
  952. {
  953. unsigned long flags;
  954. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  955. /*
  956. * Disable counter and interrupt
  957. */
  958. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  959. /*
  960. * Disable counter
  961. */
  962. armv7_pmnc_disable_counter(idx);
  963. /*
  964. * Disable interrupt for this counter
  965. */
  966. armv7_pmnc_disable_intens(idx);
  967. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  968. }
  969. static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
  970. {
  971. u32 pmnc;
  972. struct perf_sample_data data;
  973. struct pmu_hw_events *cpuc;
  974. struct pt_regs *regs;
  975. int idx;
  976. /*
  977. * Get and reset the IRQ flags
  978. */
  979. pmnc = armv7_pmnc_getreset_flags();
  980. /*
  981. * Did an overflow occur?
  982. */
  983. if (!armv7_pmnc_has_overflowed(pmnc))
  984. return IRQ_NONE;
  985. /*
  986. * Handle the counter(s) overflow(s)
  987. */
  988. regs = get_irq_regs();
  989. cpuc = &__get_cpu_var(cpu_hw_events);
  990. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  991. struct perf_event *event = cpuc->events[idx];
  992. struct hw_perf_event *hwc;
  993. /* Ignore if we don't have an event. */
  994. if (!event)
  995. continue;
  996. /*
  997. * We have a single interrupt for all counters. Check that
  998. * each counter has overflowed before we process it.
  999. */
  1000. if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
  1001. continue;
  1002. hwc = &event->hw;
  1003. armpmu_event_update(event, hwc, idx);
  1004. perf_sample_data_init(&data, 0, hwc->last_period);
  1005. if (!armpmu_event_set_period(event, hwc, idx))
  1006. continue;
  1007. if (perf_event_overflow(event, &data, regs))
  1008. cpu_pmu->disable(hwc, idx);
  1009. }
  1010. /*
  1011. * Handle the pending perf events.
  1012. *
  1013. * Note: this call *must* be run with interrupts disabled. For
  1014. * platforms that can have the PMU interrupts raised as an NMI, this
  1015. * will not work.
  1016. */
  1017. irq_work_run();
  1018. return IRQ_HANDLED;
  1019. }
  1020. static void armv7pmu_start(void)
  1021. {
  1022. unsigned long flags;
  1023. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1024. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1025. /* Enable all counters */
  1026. armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
  1027. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1028. }
  1029. static void armv7pmu_stop(void)
  1030. {
  1031. unsigned long flags;
  1032. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1033. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1034. /* Disable all counters */
  1035. armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
  1036. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1037. }
  1038. static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
  1039. struct hw_perf_event *event)
  1040. {
  1041. int idx;
  1042. unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
  1043. /* Always place a cycle counter into the cycle counter. */
  1044. if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
  1045. if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
  1046. return -EAGAIN;
  1047. return ARMV7_IDX_CYCLE_COUNTER;
  1048. }
  1049. /*
  1050. * For anything other than a cycle counter, try and use
  1051. * the events counters
  1052. */
  1053. for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  1054. if (!test_and_set_bit(idx, cpuc->used_mask))
  1055. return idx;
  1056. }
  1057. /* The counters are all in use. */
  1058. return -EAGAIN;
  1059. }
  1060. /*
  1061. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  1062. */
  1063. static int armv7pmu_set_event_filter(struct hw_perf_event *event,
  1064. struct perf_event_attr *attr)
  1065. {
  1066. unsigned long config_base = 0;
  1067. if (attr->exclude_idle)
  1068. return -EPERM;
  1069. if (attr->exclude_user)
  1070. config_base |= ARMV7_EXCLUDE_USER;
  1071. if (attr->exclude_kernel)
  1072. config_base |= ARMV7_EXCLUDE_PL1;
  1073. if (!attr->exclude_hv)
  1074. config_base |= ARMV7_INCLUDE_HYP;
  1075. /*
  1076. * Install the filter into config_base as this is used to
  1077. * construct the event type.
  1078. */
  1079. event->config_base = config_base;
  1080. return 0;
  1081. }
  1082. static void armv7pmu_reset(void *info)
  1083. {
  1084. u32 idx, nb_cnt = cpu_pmu->num_events;
  1085. /* The counter and interrupt enable registers are unknown at reset. */
  1086. for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
  1087. armv7pmu_disable_event(NULL, idx);
  1088. /* Initialize & Reset PMNC: C and P bits */
  1089. armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
  1090. }
  1091. static int armv7_a8_map_event(struct perf_event *event)
  1092. {
  1093. return armpmu_map_event(event, &armv7_a8_perf_map,
  1094. &armv7_a8_perf_cache_map, 0xFF);
  1095. }
  1096. static int armv7_a9_map_event(struct perf_event *event)
  1097. {
  1098. return armpmu_map_event(event, &armv7_a9_perf_map,
  1099. &armv7_a9_perf_cache_map, 0xFF);
  1100. }
  1101. static int armv7_a5_map_event(struct perf_event *event)
  1102. {
  1103. return armpmu_map_event(event, &armv7_a5_perf_map,
  1104. &armv7_a5_perf_cache_map, 0xFF);
  1105. }
  1106. static int armv7_a15_map_event(struct perf_event *event)
  1107. {
  1108. return armpmu_map_event(event, &armv7_a15_perf_map,
  1109. &armv7_a15_perf_cache_map, 0xFF);
  1110. }
  1111. static int armv7_a7_map_event(struct perf_event *event)
  1112. {
  1113. return armpmu_map_event(event, &armv7_a7_perf_map,
  1114. &armv7_a7_perf_cache_map, 0xFF);
  1115. }
  1116. static void armv7pmu_init(struct arm_pmu *cpu_pmu)
  1117. {
  1118. cpu_pmu->handle_irq = armv7pmu_handle_irq;
  1119. cpu_pmu->enable = armv7pmu_enable_event;
  1120. cpu_pmu->disable = armv7pmu_disable_event;
  1121. cpu_pmu->read_counter = armv7pmu_read_counter;
  1122. cpu_pmu->write_counter = armv7pmu_write_counter;
  1123. cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
  1124. cpu_pmu->start = armv7pmu_start;
  1125. cpu_pmu->stop = armv7pmu_stop;
  1126. cpu_pmu->reset = armv7pmu_reset;
  1127. cpu_pmu->max_period = (1LLU << 32) - 1;
  1128. };
  1129. static u32 __devinit armv7_read_num_pmnc_events(void)
  1130. {
  1131. u32 nb_cnt;
  1132. /* Read the nb of CNTx counters supported from PMNC */
  1133. nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
  1134. /* Add the CPU cycles counter and return */
  1135. return nb_cnt + 1;
  1136. }
  1137. static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1138. {
  1139. armv7pmu_init(cpu_pmu);
  1140. cpu_pmu->name = "ARMv7 Cortex-A8";
  1141. cpu_pmu->map_event = armv7_a8_map_event;
  1142. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1143. return 0;
  1144. }
  1145. static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1146. {
  1147. armv7pmu_init(cpu_pmu);
  1148. cpu_pmu->name = "ARMv7 Cortex-A9";
  1149. cpu_pmu->map_event = armv7_a9_map_event;
  1150. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1151. return 0;
  1152. }
  1153. static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1154. {
  1155. armv7pmu_init(cpu_pmu);
  1156. cpu_pmu->name = "ARMv7 Cortex-A5";
  1157. cpu_pmu->map_event = armv7_a5_map_event;
  1158. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1159. return 0;
  1160. }
  1161. static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1162. {
  1163. armv7pmu_init(cpu_pmu);
  1164. cpu_pmu->name = "ARMv7 Cortex-A15";
  1165. cpu_pmu->map_event = armv7_a15_map_event;
  1166. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1167. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1168. return 0;
  1169. }
  1170. static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1171. {
  1172. armv7pmu_init(cpu_pmu);
  1173. cpu_pmu->name = "ARMv7 Cortex-A7";
  1174. cpu_pmu->map_event = armv7_a7_map_event;
  1175. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1176. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1177. return 0;
  1178. }
  1179. #else
  1180. static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1181. {
  1182. return -ENODEV;
  1183. }
  1184. static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1185. {
  1186. return -ENODEV;
  1187. }
  1188. static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1189. {
  1190. return -ENODEV;
  1191. }
  1192. static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1193. {
  1194. return -ENODEV;
  1195. }
  1196. static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1197. {
  1198. return -ENODEV;
  1199. }
  1200. #endif /* CONFIG_CPU_V7 */