op_model_fsl_emb.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * Freescale Embedded oprofile support, based on ppc64 oprofile support
  3. * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
  4. *
  5. * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
  6. *
  7. * Author: Andy Fleming
  8. * Maintainer: Kumar Gala <galak@kernel.crashing.org>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <linux/oprofile.h>
  16. #include <linux/init.h>
  17. #include <linux/smp.h>
  18. #include <asm/ptrace.h>
  19. #include <asm/processor.h>
  20. #include <asm/cputable.h>
  21. #include <asm/reg_fsl_emb.h>
  22. #include <asm/page.h>
  23. #include <asm/pmc.h>
  24. #include <asm/oprofile_impl.h>
  25. static unsigned long reset_value[OP_MAX_COUNTER];
  26. static int num_counters;
  27. static int oprofile_running;
  28. static inline u32 get_pmlca(int ctr)
  29. {
  30. u32 pmlca;
  31. switch (ctr) {
  32. case 0:
  33. pmlca = mfpmr(PMRN_PMLCA0);
  34. break;
  35. case 1:
  36. pmlca = mfpmr(PMRN_PMLCA1);
  37. break;
  38. case 2:
  39. pmlca = mfpmr(PMRN_PMLCA2);
  40. break;
  41. case 3:
  42. pmlca = mfpmr(PMRN_PMLCA3);
  43. break;
  44. case 4:
  45. pmlca = mfpmr(PMRN_PMLCA4);
  46. break;
  47. case 5:
  48. pmlca = mfpmr(PMRN_PMLCA5);
  49. break;
  50. default:
  51. panic("Bad ctr number\n");
  52. }
  53. return pmlca;
  54. }
  55. static inline void set_pmlca(int ctr, u32 pmlca)
  56. {
  57. switch (ctr) {
  58. case 0:
  59. mtpmr(PMRN_PMLCA0, pmlca);
  60. break;
  61. case 1:
  62. mtpmr(PMRN_PMLCA1, pmlca);
  63. break;
  64. case 2:
  65. mtpmr(PMRN_PMLCA2, pmlca);
  66. break;
  67. case 3:
  68. mtpmr(PMRN_PMLCA3, pmlca);
  69. break;
  70. case 4:
  71. mtpmr(PMRN_PMLCA4, pmlca);
  72. break;
  73. case 5:
  74. mtpmr(PMRN_PMLCA5, pmlca);
  75. break;
  76. default:
  77. panic("Bad ctr number\n");
  78. }
  79. }
  80. static inline unsigned int ctr_read(unsigned int i)
  81. {
  82. switch(i) {
  83. case 0:
  84. return mfpmr(PMRN_PMC0);
  85. case 1:
  86. return mfpmr(PMRN_PMC1);
  87. case 2:
  88. return mfpmr(PMRN_PMC2);
  89. case 3:
  90. return mfpmr(PMRN_PMC3);
  91. case 4:
  92. return mfpmr(PMRN_PMC4);
  93. case 5:
  94. return mfpmr(PMRN_PMC5);
  95. default:
  96. return 0;
  97. }
  98. }
  99. static inline void ctr_write(unsigned int i, unsigned int val)
  100. {
  101. switch(i) {
  102. case 0:
  103. mtpmr(PMRN_PMC0, val);
  104. break;
  105. case 1:
  106. mtpmr(PMRN_PMC1, val);
  107. break;
  108. case 2:
  109. mtpmr(PMRN_PMC2, val);
  110. break;
  111. case 3:
  112. mtpmr(PMRN_PMC3, val);
  113. break;
  114. case 4:
  115. mtpmr(PMRN_PMC4, val);
  116. break;
  117. case 5:
  118. mtpmr(PMRN_PMC5, val);
  119. break;
  120. default:
  121. break;
  122. }
  123. }
  124. static void init_pmc_stop(int ctr)
  125. {
  126. u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
  127. PMLCA_FCM1 | PMLCA_FCM0);
  128. u32 pmlcb = 0;
  129. switch (ctr) {
  130. case 0:
  131. mtpmr(PMRN_PMLCA0, pmlca);
  132. mtpmr(PMRN_PMLCB0, pmlcb);
  133. break;
  134. case 1:
  135. mtpmr(PMRN_PMLCA1, pmlca);
  136. mtpmr(PMRN_PMLCB1, pmlcb);
  137. break;
  138. case 2:
  139. mtpmr(PMRN_PMLCA2, pmlca);
  140. mtpmr(PMRN_PMLCB2, pmlcb);
  141. break;
  142. case 3:
  143. mtpmr(PMRN_PMLCA3, pmlca);
  144. mtpmr(PMRN_PMLCB3, pmlcb);
  145. break;
  146. case 4:
  147. mtpmr(PMRN_PMLCA4, pmlca);
  148. mtpmr(PMRN_PMLCB4, pmlcb);
  149. break;
  150. case 5:
  151. mtpmr(PMRN_PMLCA5, pmlca);
  152. mtpmr(PMRN_PMLCB5, pmlcb);
  153. break;
  154. default:
  155. panic("Bad ctr number!\n");
  156. }
  157. }
  158. static void set_pmc_event(int ctr, int event)
  159. {
  160. u32 pmlca;
  161. pmlca = get_pmlca(ctr);
  162. pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
  163. ((event << PMLCA_EVENT_SHIFT) &
  164. PMLCA_EVENT_MASK);
  165. set_pmlca(ctr, pmlca);
  166. }
  167. static void set_pmc_user_kernel(int ctr, int user, int kernel)
  168. {
  169. u32 pmlca;
  170. pmlca = get_pmlca(ctr);
  171. if(user)
  172. pmlca &= ~PMLCA_FCU;
  173. else
  174. pmlca |= PMLCA_FCU;
  175. if(kernel)
  176. pmlca &= ~PMLCA_FCS;
  177. else
  178. pmlca |= PMLCA_FCS;
  179. set_pmlca(ctr, pmlca);
  180. }
  181. static void set_pmc_marked(int ctr, int mark0, int mark1)
  182. {
  183. u32 pmlca = get_pmlca(ctr);
  184. if(mark0)
  185. pmlca &= ~PMLCA_FCM0;
  186. else
  187. pmlca |= PMLCA_FCM0;
  188. if(mark1)
  189. pmlca &= ~PMLCA_FCM1;
  190. else
  191. pmlca |= PMLCA_FCM1;
  192. set_pmlca(ctr, pmlca);
  193. }
  194. static void pmc_start_ctr(int ctr, int enable)
  195. {
  196. u32 pmlca = get_pmlca(ctr);
  197. pmlca &= ~PMLCA_FC;
  198. if (enable)
  199. pmlca |= PMLCA_CE;
  200. else
  201. pmlca &= ~PMLCA_CE;
  202. set_pmlca(ctr, pmlca);
  203. }
  204. static void pmc_start_ctrs(int enable)
  205. {
  206. u32 pmgc0 = mfpmr(PMRN_PMGC0);
  207. pmgc0 &= ~PMGC0_FAC;
  208. pmgc0 |= PMGC0_FCECE;
  209. if (enable)
  210. pmgc0 |= PMGC0_PMIE;
  211. else
  212. pmgc0 &= ~PMGC0_PMIE;
  213. mtpmr(PMRN_PMGC0, pmgc0);
  214. }
  215. static void pmc_stop_ctrs(void)
  216. {
  217. u32 pmgc0 = mfpmr(PMRN_PMGC0);
  218. pmgc0 |= PMGC0_FAC;
  219. pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
  220. mtpmr(PMRN_PMGC0, pmgc0);
  221. }
  222. static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
  223. {
  224. int i;
  225. /* freeze all counters */
  226. pmc_stop_ctrs();
  227. for (i = 0;i < num_counters;i++) {
  228. init_pmc_stop(i);
  229. set_pmc_event(i, ctr[i].event);
  230. set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
  231. }
  232. return 0;
  233. }
  234. static int fsl_emb_reg_setup(struct op_counter_config *ctr,
  235. struct op_system_config *sys,
  236. int num_ctrs)
  237. {
  238. int i;
  239. num_counters = num_ctrs;
  240. /* Our counters count up, and "count" refers to
  241. * how much before the next interrupt, and we interrupt
  242. * on overflow. So we calculate the starting value
  243. * which will give us "count" until overflow.
  244. * Then we set the events on the enabled counters */
  245. for (i = 0; i < num_counters; ++i)
  246. reset_value[i] = 0x80000000UL - ctr[i].count;
  247. return 0;
  248. }
  249. static int fsl_emb_start(struct op_counter_config *ctr)
  250. {
  251. int i;
  252. mtmsr(mfmsr() | MSR_PMM);
  253. for (i = 0; i < num_counters; ++i) {
  254. if (ctr[i].enabled) {
  255. ctr_write(i, reset_value[i]);
  256. /* Set each enabled counter to only
  257. * count when the Mark bit is *not* set */
  258. set_pmc_marked(i, 1, 0);
  259. pmc_start_ctr(i, 1);
  260. } else {
  261. ctr_write(i, 0);
  262. /* Set the ctr to be stopped */
  263. pmc_start_ctr(i, 0);
  264. }
  265. }
  266. /* Clear the freeze bit, and enable the interrupt.
  267. * The counters won't actually start until the rfi clears
  268. * the PMM bit */
  269. pmc_start_ctrs(1);
  270. oprofile_running = 1;
  271. pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
  272. mfpmr(PMRN_PMGC0));
  273. return 0;
  274. }
  275. static void fsl_emb_stop(void)
  276. {
  277. /* freeze counters */
  278. pmc_stop_ctrs();
  279. oprofile_running = 0;
  280. pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
  281. mfpmr(PMRN_PMGC0));
  282. mb();
  283. }
  284. static void fsl_emb_handle_interrupt(struct pt_regs *regs,
  285. struct op_counter_config *ctr)
  286. {
  287. unsigned long pc;
  288. int is_kernel;
  289. int val;
  290. int i;
  291. pc = regs->nip;
  292. is_kernel = is_kernel_addr(pc);
  293. for (i = 0; i < num_counters; ++i) {
  294. val = ctr_read(i);
  295. if (val < 0) {
  296. if (oprofile_running && ctr[i].enabled) {
  297. oprofile_add_ext_sample(pc, regs, i, is_kernel);
  298. ctr_write(i, reset_value[i]);
  299. } else {
  300. ctr_write(i, 0);
  301. }
  302. }
  303. }
  304. /* The freeze bit was set by the interrupt. */
  305. /* Clear the freeze bit, and reenable the interrupt. The
  306. * counters won't actually start until the rfi clears the PMM
  307. * bit. The PMM bit should not be set until after the interrupt
  308. * is cleared to avoid it getting lost in some hypervisor
  309. * environments.
  310. */
  311. mtmsr(mfmsr() | MSR_PMM);
  312. pmc_start_ctrs(1);
  313. }
  314. struct op_powerpc_model op_model_fsl_emb = {
  315. .reg_setup = fsl_emb_reg_setup,
  316. .cpu_setup = fsl_emb_cpu_setup,
  317. .start = fsl_emb_start,
  318. .stop = fsl_emb_stop,
  319. .handle_interrupt = fsl_emb_handle_interrupt,
  320. };