perf_event.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * Performance events support for SH-4A performance counters
  3. *
  4. * Copyright (C) 2009 Paul Mundt
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/init.h>
  12. #include <linux/io.h>
  13. #include <linux/irq.h>
  14. #include <linux/perf_event.h>
  15. #include <asm/processor.h>
  16. #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
  17. #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
  18. #define CCBR_CIT_MASK (0x7ff << 6)
  19. #define CCBR_DUC (1 << 3)
  20. #define CCBR_CMDS (1 << 1)
  21. #define CCBR_PPCE (1 << 0)
  22. #define PPC_PMCAT 0xfc100080
  23. #define PMCAT_OVF3 (1 << 27)
  24. #define PMCAT_CNN3 (1 << 26)
  25. #define PMCAT_CLR3 (1 << 25)
  26. #define PMCAT_OVF2 (1 << 19)
  27. #define PMCAT_CLR2 (1 << 17)
  28. #define PMCAT_OVF1 (1 << 11)
  29. #define PMCAT_CNN1 (1 << 10)
  30. #define PMCAT_CLR1 (1 << 9)
  31. #define PMCAT_OVF0 (1 << 3)
  32. #define PMCAT_CLR0 (1 << 1)
  33. static struct sh_pmu sh4a_pmu;
  34. /*
  35. * Supported raw event codes:
  36. *
  37. * Event Code Description
  38. * ---------- -----------
  39. *
  40. * 0x0000 number of elapsed cycles
  41. * 0x0200 number of elapsed cycles in privileged mode
  42. * 0x0280 number of elapsed cycles while SR.BL is asserted
  43. * 0x0202 instruction execution
  44. * 0x0203 instruction execution in parallel
  45. * 0x0204 number of unconditional branches
  46. * 0x0208 number of exceptions
  47. * 0x0209 number of interrupts
  48. * 0x0220 UTLB miss caused by instruction fetch
  49. * 0x0222 UTLB miss caused by operand access
  50. * 0x02a0 number of ITLB misses
  51. * 0x0028 number of accesses to instruction memories
  52. * 0x0029 number of accesses to instruction cache
  53. * 0x002a instruction cache miss
  54. * 0x022e number of access to instruction X/Y memory
  55. * 0x0030 number of reads to operand memories
  56. * 0x0038 number of writes to operand memories
  57. * 0x0031 number of operand cache read accesses
  58. * 0x0039 number of operand cache write accesses
  59. * 0x0032 operand cache read miss
  60. * 0x003a operand cache write miss
  61. * 0x0236 number of reads to operand X/Y memory
  62. * 0x023e number of writes to operand X/Y memory
  63. * 0x0237 number of reads to operand U memory
  64. * 0x023f number of writes to operand U memory
  65. * 0x0337 number of U memory read buffer misses
  66. * 0x02b4 number of wait cycles due to operand read access
  67. * 0x02bc number of wait cycles due to operand write access
  68. * 0x0033 number of wait cycles due to operand cache read miss
  69. * 0x003b number of wait cycles due to operand cache write miss
  70. */
  71. /*
  72. * Special reserved bits used by hardware emulators, read values will
  73. * vary, but writes must always be 0.
  74. */
  75. #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
  76. static const int sh4a_general_events[] = {
  77. [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
  78. [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
  79. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
  80. [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
  81. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
  82. [PERF_COUNT_HW_BRANCH_MISSES] = -1,
  83. [PERF_COUNT_HW_BUS_CYCLES] = -1,
  84. };
  85. #define C(x) PERF_COUNT_HW_CACHE_##x
  86. static const int sh4a_cache_events
  87. [PERF_COUNT_HW_CACHE_MAX]
  88. [PERF_COUNT_HW_CACHE_OP_MAX]
  89. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  90. {
  91. [ C(L1D) ] = {
  92. [ C(OP_READ) ] = {
  93. [ C(RESULT_ACCESS) ] = 0x0031,
  94. [ C(RESULT_MISS) ] = 0x0032,
  95. },
  96. [ C(OP_WRITE) ] = {
  97. [ C(RESULT_ACCESS) ] = 0x0039,
  98. [ C(RESULT_MISS) ] = 0x003a,
  99. },
  100. [ C(OP_PREFETCH) ] = {
  101. [ C(RESULT_ACCESS) ] = 0,
  102. [ C(RESULT_MISS) ] = 0,
  103. },
  104. },
  105. [ C(L1I) ] = {
  106. [ C(OP_READ) ] = {
  107. [ C(RESULT_ACCESS) ] = 0x0029,
  108. [ C(RESULT_MISS) ] = 0x002a,
  109. },
  110. [ C(OP_WRITE) ] = {
  111. [ C(RESULT_ACCESS) ] = -1,
  112. [ C(RESULT_MISS) ] = -1,
  113. },
  114. [ C(OP_PREFETCH) ] = {
  115. [ C(RESULT_ACCESS) ] = 0,
  116. [ C(RESULT_MISS) ] = 0,
  117. },
  118. },
  119. [ C(LL) ] = {
  120. [ C(OP_READ) ] = {
  121. [ C(RESULT_ACCESS) ] = 0x0030,
  122. [ C(RESULT_MISS) ] = 0,
  123. },
  124. [ C(OP_WRITE) ] = {
  125. [ C(RESULT_ACCESS) ] = 0x0038,
  126. [ C(RESULT_MISS) ] = 0,
  127. },
  128. [ C(OP_PREFETCH) ] = {
  129. [ C(RESULT_ACCESS) ] = 0,
  130. [ C(RESULT_MISS) ] = 0,
  131. },
  132. },
  133. [ C(DTLB) ] = {
  134. [ C(OP_READ) ] = {
  135. [ C(RESULT_ACCESS) ] = 0x0222,
  136. [ C(RESULT_MISS) ] = 0x0220,
  137. },
  138. [ C(OP_WRITE) ] = {
  139. [ C(RESULT_ACCESS) ] = 0,
  140. [ C(RESULT_MISS) ] = 0,
  141. },
  142. [ C(OP_PREFETCH) ] = {
  143. [ C(RESULT_ACCESS) ] = 0,
  144. [ C(RESULT_MISS) ] = 0,
  145. },
  146. },
  147. [ C(ITLB) ] = {
  148. [ C(OP_READ) ] = {
  149. [ C(RESULT_ACCESS) ] = 0,
  150. [ C(RESULT_MISS) ] = 0x02a0,
  151. },
  152. [ C(OP_WRITE) ] = {
  153. [ C(RESULT_ACCESS) ] = -1,
  154. [ C(RESULT_MISS) ] = -1,
  155. },
  156. [ C(OP_PREFETCH) ] = {
  157. [ C(RESULT_ACCESS) ] = -1,
  158. [ C(RESULT_MISS) ] = -1,
  159. },
  160. },
  161. [ C(BPU) ] = {
  162. [ C(OP_READ) ] = {
  163. [ C(RESULT_ACCESS) ] = -1,
  164. [ C(RESULT_MISS) ] = -1,
  165. },
  166. [ C(OP_WRITE) ] = {
  167. [ C(RESULT_ACCESS) ] = -1,
  168. [ C(RESULT_MISS) ] = -1,
  169. },
  170. [ C(OP_PREFETCH) ] = {
  171. [ C(RESULT_ACCESS) ] = -1,
  172. [ C(RESULT_MISS) ] = -1,
  173. },
  174. },
  175. };
  176. static int sh4a_event_map(int event)
  177. {
  178. return sh4a_general_events[event];
  179. }
  180. static u64 sh4a_pmu_read(int idx)
  181. {
  182. return __raw_readl(PPC_PMCTR(idx));
  183. }
  184. static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
  185. {
  186. unsigned int tmp;
  187. tmp = __raw_readl(PPC_CCBR(idx));
  188. tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
  189. __raw_writel(tmp, PPC_CCBR(idx));
  190. }
  191. static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
  192. {
  193. unsigned int tmp;
  194. tmp = __raw_readl(PPC_PMCAT);
  195. tmp &= ~PMCAT_EMU_CLR_MASK;
  196. tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
  197. __raw_writel(tmp, PPC_PMCAT);
  198. tmp = __raw_readl(PPC_CCBR(idx));
  199. tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
  200. __raw_writel(tmp, PPC_CCBR(idx));
  201. __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
  202. }
  203. static void sh4a_pmu_disable_all(void)
  204. {
  205. int i;
  206. for (i = 0; i < sh4a_pmu.num_events; i++)
  207. __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
  208. }
  209. static void sh4a_pmu_enable_all(void)
  210. {
  211. int i;
  212. for (i = 0; i < sh4a_pmu.num_events; i++)
  213. __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
  214. }
  215. static struct sh_pmu sh4a_pmu = {
  216. .name = "SH-4A",
  217. .num_events = 2,
  218. .event_map = sh4a_event_map,
  219. .max_events = ARRAY_SIZE(sh4a_general_events),
  220. .raw_event_mask = 0x3ff,
  221. .cache_events = &sh4a_cache_events,
  222. .read = sh4a_pmu_read,
  223. .disable = sh4a_pmu_disable,
  224. .enable = sh4a_pmu_enable,
  225. .disable_all = sh4a_pmu_disable_all,
  226. .enable_all = sh4a_pmu_enable_all,
  227. };
  228. static int __init sh4a_pmu_init(void)
  229. {
  230. /*
  231. * Make sure this CPU actually has perf counters.
  232. */
  233. if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
  234. pr_notice("HW perf events unsupported, software events only.\n");
  235. return -ENODEV;
  236. }
  237. return register_sh_pmu(&sh4a_pmu);
  238. }
  239. arch_initcall(sh4a_pmu_init);