perf_event.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /*
  2. * Performance events support for SH-4A performance counters
  3. *
  4. * Copyright (C) 2009, 2010 Paul Mundt
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/init.h>
  12. #include <linux/io.h>
  13. #include <linux/irq.h>
  14. #include <linux/perf_event.h>
  15. #include <asm/processor.h>
  16. #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
  17. #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
  18. #define CCBR_CIT_MASK (0x7ff << 6)
  19. #define CCBR_DUC (1 << 3)
  20. #define CCBR_CMDS (1 << 1)
  21. #define CCBR_PPCE (1 << 0)
  22. #ifdef CONFIG_CPU_SHX3
  23. /*
  24. * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
  25. * and PMCTR locations remains tentatively constant. This change remains
  26. * wholly undocumented, and was simply found through trial and error.
  27. *
  28. * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
  29. * it's unclear when this ceased to be the case. For now we always use
  30. * the new location (if future parts keep up with this trend then
  31. * scanning for them at runtime also remains a viable option.)
  32. *
  33. * The gap in the register space also suggests that there are other
  34. * undocumented counters, so this will need to be revisited at a later
  35. * point in time.
  36. */
  37. #define PPC_PMCAT 0xfc100240
  38. #else
  39. #define PPC_PMCAT 0xfc100080
  40. #endif
  41. #define PMCAT_OVF3 (1 << 27)
  42. #define PMCAT_CNN3 (1 << 26)
  43. #define PMCAT_CLR3 (1 << 25)
  44. #define PMCAT_OVF2 (1 << 19)
  45. #define PMCAT_CLR2 (1 << 17)
  46. #define PMCAT_OVF1 (1 << 11)
  47. #define PMCAT_CNN1 (1 << 10)
  48. #define PMCAT_CLR1 (1 << 9)
  49. #define PMCAT_OVF0 (1 << 3)
  50. #define PMCAT_CLR0 (1 << 1)
  51. static struct sh_pmu sh4a_pmu;
  52. /*
  53. * Supported raw event codes:
  54. *
  55. * Event Code Description
  56. * ---------- -----------
  57. *
  58. * 0x0000 number of elapsed cycles
  59. * 0x0200 number of elapsed cycles in privileged mode
  60. * 0x0280 number of elapsed cycles while SR.BL is asserted
  61. * 0x0202 instruction execution
  62. * 0x0203 instruction execution in parallel
  63. * 0x0204 number of unconditional branches
  64. * 0x0208 number of exceptions
  65. * 0x0209 number of interrupts
  66. * 0x0220 UTLB miss caused by instruction fetch
  67. * 0x0222 UTLB miss caused by operand access
  68. * 0x02a0 number of ITLB misses
  69. * 0x0028 number of accesses to instruction memories
  70. * 0x0029 number of accesses to instruction cache
  71. * 0x002a instruction cache miss
  72. * 0x022e number of access to instruction X/Y memory
  73. * 0x0030 number of reads to operand memories
  74. * 0x0038 number of writes to operand memories
  75. * 0x0031 number of operand cache read accesses
  76. * 0x0039 number of operand cache write accesses
  77. * 0x0032 operand cache read miss
  78. * 0x003a operand cache write miss
  79. * 0x0236 number of reads to operand X/Y memory
  80. * 0x023e number of writes to operand X/Y memory
  81. * 0x0237 number of reads to operand U memory
  82. * 0x023f number of writes to operand U memory
  83. * 0x0337 number of U memory read buffer misses
  84. * 0x02b4 number of wait cycles due to operand read access
  85. * 0x02bc number of wait cycles due to operand write access
  86. * 0x0033 number of wait cycles due to operand cache read miss
  87. * 0x003b number of wait cycles due to operand cache write miss
  88. */
  89. /*
  90. * Special reserved bits used by hardware emulators, read values will
  91. * vary, but writes must always be 0.
  92. */
  93. #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
  94. static const int sh4a_general_events[] = {
  95. [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
  96. [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
  97. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
  98. [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
  99. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
  100. [PERF_COUNT_HW_BRANCH_MISSES] = -1,
  101. [PERF_COUNT_HW_BUS_CYCLES] = -1,
  102. };
  103. #define C(x) PERF_COUNT_HW_CACHE_##x
  104. static const int sh4a_cache_events
  105. [PERF_COUNT_HW_CACHE_MAX]
  106. [PERF_COUNT_HW_CACHE_OP_MAX]
  107. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  108. {
  109. [ C(L1D) ] = {
  110. [ C(OP_READ) ] = {
  111. [ C(RESULT_ACCESS) ] = 0x0031,
  112. [ C(RESULT_MISS) ] = 0x0032,
  113. },
  114. [ C(OP_WRITE) ] = {
  115. [ C(RESULT_ACCESS) ] = 0x0039,
  116. [ C(RESULT_MISS) ] = 0x003a,
  117. },
  118. [ C(OP_PREFETCH) ] = {
  119. [ C(RESULT_ACCESS) ] = 0,
  120. [ C(RESULT_MISS) ] = 0,
  121. },
  122. },
  123. [ C(L1I) ] = {
  124. [ C(OP_READ) ] = {
  125. [ C(RESULT_ACCESS) ] = 0x0029,
  126. [ C(RESULT_MISS) ] = 0x002a,
  127. },
  128. [ C(OP_WRITE) ] = {
  129. [ C(RESULT_ACCESS) ] = -1,
  130. [ C(RESULT_MISS) ] = -1,
  131. },
  132. [ C(OP_PREFETCH) ] = {
  133. [ C(RESULT_ACCESS) ] = 0,
  134. [ C(RESULT_MISS) ] = 0,
  135. },
  136. },
  137. [ C(LL) ] = {
  138. [ C(OP_READ) ] = {
  139. [ C(RESULT_ACCESS) ] = 0x0030,
  140. [ C(RESULT_MISS) ] = 0,
  141. },
  142. [ C(OP_WRITE) ] = {
  143. [ C(RESULT_ACCESS) ] = 0x0038,
  144. [ C(RESULT_MISS) ] = 0,
  145. },
  146. [ C(OP_PREFETCH) ] = {
  147. [ C(RESULT_ACCESS) ] = 0,
  148. [ C(RESULT_MISS) ] = 0,
  149. },
  150. },
  151. [ C(DTLB) ] = {
  152. [ C(OP_READ) ] = {
  153. [ C(RESULT_ACCESS) ] = 0x0222,
  154. [ C(RESULT_MISS) ] = 0x0220,
  155. },
  156. [ C(OP_WRITE) ] = {
  157. [ C(RESULT_ACCESS) ] = 0,
  158. [ C(RESULT_MISS) ] = 0,
  159. },
  160. [ C(OP_PREFETCH) ] = {
  161. [ C(RESULT_ACCESS) ] = 0,
  162. [ C(RESULT_MISS) ] = 0,
  163. },
  164. },
  165. [ C(ITLB) ] = {
  166. [ C(OP_READ) ] = {
  167. [ C(RESULT_ACCESS) ] = 0,
  168. [ C(RESULT_MISS) ] = 0x02a0,
  169. },
  170. [ C(OP_WRITE) ] = {
  171. [ C(RESULT_ACCESS) ] = -1,
  172. [ C(RESULT_MISS) ] = -1,
  173. },
  174. [ C(OP_PREFETCH) ] = {
  175. [ C(RESULT_ACCESS) ] = -1,
  176. [ C(RESULT_MISS) ] = -1,
  177. },
  178. },
  179. [ C(BPU) ] = {
  180. [ C(OP_READ) ] = {
  181. [ C(RESULT_ACCESS) ] = -1,
  182. [ C(RESULT_MISS) ] = -1,
  183. },
  184. [ C(OP_WRITE) ] = {
  185. [ C(RESULT_ACCESS) ] = -1,
  186. [ C(RESULT_MISS) ] = -1,
  187. },
  188. [ C(OP_PREFETCH) ] = {
  189. [ C(RESULT_ACCESS) ] = -1,
  190. [ C(RESULT_MISS) ] = -1,
  191. },
  192. },
  193. };
  194. static int sh4a_event_map(int event)
  195. {
  196. return sh4a_general_events[event];
  197. }
  198. static u64 sh4a_pmu_read(int idx)
  199. {
  200. return __raw_readl(PPC_PMCTR(idx));
  201. }
  202. static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
  203. {
  204. unsigned int tmp;
  205. tmp = __raw_readl(PPC_CCBR(idx));
  206. tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
  207. __raw_writel(tmp, PPC_CCBR(idx));
  208. }
  209. static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
  210. {
  211. unsigned int tmp;
  212. tmp = __raw_readl(PPC_PMCAT);
  213. tmp &= ~PMCAT_EMU_CLR_MASK;
  214. tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
  215. __raw_writel(tmp, PPC_PMCAT);
  216. tmp = __raw_readl(PPC_CCBR(idx));
  217. tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
  218. __raw_writel(tmp, PPC_CCBR(idx));
  219. __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
  220. }
  221. static void sh4a_pmu_disable_all(void)
  222. {
  223. int i;
  224. for (i = 0; i < sh4a_pmu.num_events; i++)
  225. __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
  226. }
  227. static void sh4a_pmu_enable_all(void)
  228. {
  229. int i;
  230. for (i = 0; i < sh4a_pmu.num_events; i++)
  231. __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
  232. }
  233. static struct sh_pmu sh4a_pmu = {
  234. .name = "sh4a",
  235. .num_events = 2,
  236. .event_map = sh4a_event_map,
  237. .max_events = ARRAY_SIZE(sh4a_general_events),
  238. .raw_event_mask = 0x3ff,
  239. .cache_events = &sh4a_cache_events,
  240. .read = sh4a_pmu_read,
  241. .disable = sh4a_pmu_disable,
  242. .enable = sh4a_pmu_enable,
  243. .disable_all = sh4a_pmu_disable_all,
  244. .enable_all = sh4a_pmu_enable_all,
  245. };
  246. static int __init sh4a_pmu_init(void)
  247. {
  248. /*
  249. * Make sure this CPU actually has perf counters.
  250. */
  251. if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
  252. pr_notice("HW perf events unsupported, software events only.\n");
  253. return -ENODEV;
  254. }
  255. return register_sh_pmu(&sh4a_pmu);
  256. }
  257. early_initcall(sh4a_pmu_init);