perf_event.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * Linux performance counter support for ARC
  3. *
  4. * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. */
  11. #ifndef __ASM_PERF_EVENT_H
  12. #define __ASM_PERF_EVENT_H
  13. /* real maximum varies per CPU, this is the maximum supported by the driver */
  14. #define ARC_PMU_MAX_HWEVENTS 64
  15. #define ARC_REG_CC_BUILD 0xF6
  16. #define ARC_REG_CC_INDEX 0x240
  17. #define ARC_REG_CC_NAME0 0x241
  18. #define ARC_REG_CC_NAME1 0x242
  19. #define ARC_REG_PCT_BUILD 0xF5
  20. #define ARC_REG_PCT_COUNTL 0x250
  21. #define ARC_REG_PCT_COUNTH 0x251
  22. #define ARC_REG_PCT_SNAPL 0x252
  23. #define ARC_REG_PCT_SNAPH 0x253
  24. #define ARC_REG_PCT_CONFIG 0x254
  25. #define ARC_REG_PCT_CONTROL 0x255
  26. #define ARC_REG_PCT_INDEX 0x256
  27. #define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
  28. #define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
  29. struct arc_reg_pct_build {
  30. #ifdef CONFIG_CPU_BIG_ENDIAN
  31. unsigned int m:8, c:8, r:6, s:2, v:8;
  32. #else
  33. unsigned int v:8, s:2, r:6, c:8, m:8;
  34. #endif
  35. };
  36. struct arc_reg_cc_build {
  37. #ifdef CONFIG_CPU_BIG_ENDIAN
  38. unsigned int c:16, r:8, v:8;
  39. #else
  40. unsigned int v:8, r:8, c:16;
  41. #endif
  42. };
  43. #define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
  44. #define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
  45. #define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
  46. #define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
  47. #define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
  48. #define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
  49. #define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6)
  50. /*
  51. * The "generalized" performance events seem to really be a copy
  52. * of the available events on x86 processors; the mapping to ARC
  53. * events is not always possible 1-to-1. Fortunately, there doesn't
  54. * seem to be an exact definition for these events, so we can cheat
  55. * a bit where necessary.
  56. *
  57. * In particular, the following PERF events may behave a bit differently
  58. * compared to other architectures:
  59. *
  60. * PERF_COUNT_HW_CPU_CYCLES
  61. * Cycles not in halted state
  62. *
  63. * PERF_COUNT_HW_REF_CPU_CYCLES
  64. * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES
  65. * for now as we don't do Dynamic Voltage/Frequency Scaling (yet)
  66. *
  67. * PERF_COUNT_HW_BUS_CYCLES
  68. * Unclear what this means, Intel uses 0x013c, which according to
  69. * their datasheet means "unhalted reference cycles". It sounds similar
  70. * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it.
  71. *
  72. * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
  73. * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
  74. * The ARC 700 can either measure stalls per pipeline stage, or all stalls
  75. * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
  76. * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
  77. * STALLED_CYCLES_FRONTEND.
  78. *
  79. * We could start multiple performance counters and combine everything
  80. * afterwards, but that makes it complicated.
  81. *
  82. * Note that I$ cache misses aren't counted by either of the two!
  83. */
  84. static const char * const arc_pmu_ev_hw_map[] = {
  85. [PERF_COUNT_HW_CPU_CYCLES] = "crun",
  86. [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
  87. [PERF_COUNT_HW_BUS_CYCLES] = "crun",
  88. [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
  89. [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail",
  90. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
  91. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
  92. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
  93. [PERF_COUNT_ARC_DCLM] = "dclm",
  94. [PERF_COUNT_ARC_DCSM] = "dcsm",
  95. [PERF_COUNT_ARC_ICM] = "icm",
  96. [PERF_COUNT_ARC_BPOK] = "bpok",
  97. [PERF_COUNT_ARC_EDTLB] = "edtlb",
  98. [PERF_COUNT_ARC_EITLB] = "eitlb",
  99. };
  100. #define C(_x) PERF_COUNT_HW_CACHE_##_x
  101. #define CACHE_OP_UNSUPPORTED 0xffff
  102. static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
  103. [C(L1D)] = {
  104. [C(OP_READ)] = {
  105. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  106. [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
  107. },
  108. [C(OP_WRITE)] = {
  109. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  110. [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
  111. },
  112. [C(OP_PREFETCH)] = {
  113. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  114. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  115. },
  116. },
  117. [C(L1I)] = {
  118. [C(OP_READ)] = {
  119. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  120. [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
  121. },
  122. [C(OP_WRITE)] = {
  123. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  124. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  125. },
  126. [C(OP_PREFETCH)] = {
  127. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  128. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  129. },
  130. },
  131. [C(LL)] = {
  132. [C(OP_READ)] = {
  133. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  134. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  135. },
  136. [C(OP_WRITE)] = {
  137. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  138. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  139. },
  140. [C(OP_PREFETCH)] = {
  141. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  142. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  143. },
  144. },
  145. [C(DTLB)] = {
  146. [C(OP_READ)] = {
  147. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  148. [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
  149. },
  150. [C(OP_WRITE)] = {
  151. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  152. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  153. },
  154. [C(OP_PREFETCH)] = {
  155. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  156. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  157. },
  158. },
  159. [C(ITLB)] = {
  160. [C(OP_READ)] = {
  161. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  162. [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
  163. },
  164. [C(OP_WRITE)] = {
  165. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  166. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  167. },
  168. [C(OP_PREFETCH)] = {
  169. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  170. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  171. },
  172. },
  173. [C(BPU)] = {
  174. [C(OP_READ)] = {
  175. [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
  176. [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
  177. },
  178. [C(OP_WRITE)] = {
  179. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  180. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  181. },
  182. [C(OP_PREFETCH)] = {
  183. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  184. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  185. },
  186. },
  187. [C(NODE)] = {
  188. [C(OP_READ)] = {
  189. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  190. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  191. },
  192. [C(OP_WRITE)] = {
  193. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  194. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  195. },
  196. [C(OP_PREFETCH)] = {
  197. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  198. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  199. },
  200. },
  201. };
  202. #endif /* __ASM_PERF_EVENT_H */