bcm1250_tbprof.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /*
  2. * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. */
  18. #define SBPROF_TB_DEBUG 0
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/types.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/fs.h>
  27. #include <linux/errno.h>
  28. #include <linux/reboot.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/sibyte/sb1250.h>
  32. #include <asm/sibyte/sb1250_regs.h>
  33. #include <asm/sibyte/sb1250_scd.h>
  34. #include <asm/sibyte/sb1250_int.h>
  35. #include <asm/sibyte/trace_prof.h>
  36. #define DEVNAME "bcm1250_tbprof"
  37. static struct sbprof_tb sbp;
  38. #define TB_FULL (sbp.next_tb_sample == MAX_TB_SAMPLES)
  39. /************************************************************************
  40. * Support for ZBbus sampling using the trace buffer
  41. *
  42. * We use the SCD performance counter interrupt, caused by a Zclk counter
  43. * overflow, to trigger the start of tracing.
  44. *
  45. * We set the trace buffer to sample everything and freeze on
  46. * overflow.
  47. *
  48. * We map the interrupt for trace_buffer_freeze to handle it on CPU 0.
  49. *
  50. ************************************************************************/
  51. static u_int64_t tb_period;
  52. static void arm_tb(void)
  53. {
  54. u_int64_t scdperfcnt;
  55. u_int64_t next = (1ULL << 40) - tb_period;
  56. u_int64_t tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
  57. /* Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to
  58. trigger start of trace. XXX vary sampling period */
  59. bus_writeq(0, IOADDR(A_SCD_PERF_CNT_1));
  60. scdperfcnt = bus_readq(IOADDR(A_SCD_PERF_CNT_CFG));
  61. /* Unfortunately, in Pass 2 we must clear all counters to knock down
  62. a previous interrupt request. This means that bus profiling
  63. requires ALL of the SCD perf counters. */
  64. bus_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) | // keep counters 0,2,3 as is
  65. M_SPC_CFG_ENABLE | // enable counting
  66. M_SPC_CFG_CLEAR | // clear all counters
  67. V_SPC_CFG_SRC1(1), // counter 1 counts cycles
  68. IOADDR(A_SCD_PERF_CNT_CFG));
  69. bus_writeq(next, IOADDR(A_SCD_PERF_CNT_1));
  70. /* Reset the trace buffer */
  71. bus_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
  72. #if 0 && defined(M_SCD_TRACE_CFG_FORCECNT)
  73. /* XXXKW may want to expose control to the data-collector */
  74. tb_options |= M_SCD_TRACE_CFG_FORCECNT;
  75. #endif
  76. bus_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG));
  77. sbp.tb_armed = 1;
  78. }
  79. static irqreturn_t sbprof_tb_intr(int irq, void *dev_id, struct pt_regs *regs)
  80. {
  81. int i;
  82. DBG(printk(DEVNAME ": tb_intr\n"));
  83. if (sbp.next_tb_sample < MAX_TB_SAMPLES) {
  84. /* XXX should use XKPHYS to make writes bypass L2 */
  85. u_int64_t *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++];
  86. /* Read out trace */
  87. bus_writeq(M_SCD_TRACE_CFG_START_READ, IOADDR(A_SCD_TRACE_CFG));
  88. __asm__ __volatile__ ("sync" : : : "memory");
  89. /* Loop runs backwards because bundles are read out in reverse order */
  90. for (i = 256 * 6; i > 0; i -= 6) {
  91. // Subscripts decrease to put bundle in the order
  92. // t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi
  93. p[i-1] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t2 hi
  94. p[i-2] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t2 lo
  95. p[i-3] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t1 hi
  96. p[i-4] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t1 lo
  97. p[i-5] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t0 hi
  98. p[i-6] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t0 lo
  99. }
  100. if (!sbp.tb_enable) {
  101. DBG(printk(DEVNAME ": tb_intr shutdown\n"));
  102. bus_writeq(M_SCD_TRACE_CFG_RESET,
  103. IOADDR(A_SCD_TRACE_CFG));
  104. sbp.tb_armed = 0;
  105. wake_up(&sbp.tb_sync);
  106. } else {
  107. arm_tb(); // knock down current interrupt and get another one later
  108. }
  109. } else {
  110. /* No more trace buffer samples */
  111. DBG(printk(DEVNAME ": tb_intr full\n"));
  112. bus_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
  113. sbp.tb_armed = 0;
  114. if (!sbp.tb_enable) {
  115. wake_up(&sbp.tb_sync);
  116. }
  117. wake_up(&sbp.tb_read);
  118. }
  119. return IRQ_HANDLED;
  120. }
  121. static irqreturn_t sbprof_pc_intr(int irq, void *dev_id, struct pt_regs *regs)
  122. {
  123. printk(DEVNAME ": unexpected pc_intr");
  124. return IRQ_NONE;
  125. }
  126. int sbprof_zbprof_start(struct file *filp)
  127. {
  128. u_int64_t scdperfcnt;
  129. if (sbp.tb_enable)
  130. return -EBUSY;
  131. DBG(printk(DEVNAME ": starting\n"));
  132. sbp.tb_enable = 1;
  133. sbp.next_tb_sample = 0;
  134. filp->f_pos = 0;
  135. if (request_irq
  136. (K_INT_TRACE_FREEZE, sbprof_tb_intr, 0, DEVNAME " trace freeze", &sbp)) {
  137. return -EBUSY;
  138. }
  139. /* Make sure there isn't a perf-cnt interrupt waiting */
  140. scdperfcnt = bus_readq(IOADDR(A_SCD_PERF_CNT_CFG));
  141. /* Disable and clear counters, override SRC_1 */
  142. bus_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) |
  143. M_SPC_CFG_ENABLE |
  144. M_SPC_CFG_CLEAR |
  145. V_SPC_CFG_SRC1(1),
  146. IOADDR(A_SCD_PERF_CNT_CFG));
  147. /* We grab this interrupt to prevent others from trying to use
  148. it, even though we don't want to service the interrupts
  149. (they only feed into the trace-on-interrupt mechanism) */
  150. if (request_irq
  151. (K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
  152. free_irq(K_INT_TRACE_FREEZE, &sbp);
  153. return -EBUSY;
  154. }
  155. /* I need the core to mask these, but the interrupt mapper to
  156. pass them through. I am exploiting my knowledge that
  157. cp0_status masks out IP[5]. krw */
  158. bus_writeq(K_INT_MAP_I3,
  159. IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
  160. (K_INT_PERF_CNT << 3)));
  161. /* Initialize address traps */
  162. bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_0));
  163. bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_1));
  164. bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_2));
  165. bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_3));
  166. bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0));
  167. bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1));
  168. bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2));
  169. bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3));
  170. bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0));
  171. bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1));
  172. bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2));
  173. bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
  174. /* Initialize Trace Event 0-7 */
  175. // when interrupt
  176. bus_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
  177. bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
  178. bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
  179. bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3));
  180. bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4));
  181. bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5));
  182. bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6));
  183. bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7));
  184. /* Initialize Trace Sequence 0-7 */
  185. // Start on event 0 (interrupt)
  186. bus_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff,
  187. IOADDR(A_SCD_TRACE_SEQUENCE_0));
  188. // dsamp when d used | asamp when a used
  189. bus_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
  190. K_SCD_TRSEQ_TRIGGER_ALL,
  191. IOADDR(A_SCD_TRACE_SEQUENCE_1));
  192. bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2));
  193. bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3));
  194. bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4));
  195. bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5));
  196. bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6));
  197. bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7));
  198. /* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */
  199. bus_writeq((1ULL << K_INT_PERF_CNT),
  200. IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE)));
  201. arm_tb();
  202. DBG(printk(DEVNAME ": done starting\n"));
  203. return 0;
  204. }
  205. int sbprof_zbprof_stop(void)
  206. {
  207. DBG(printk(DEVNAME ": stopping\n"));
  208. if (sbp.tb_enable) {
  209. sbp.tb_enable = 0;
  210. /* XXXKW there is a window here where the intr handler
  211. may run, see the disable, and do the wake_up before
  212. this sleep happens. */
  213. if (sbp.tb_armed) {
  214. DBG(printk(DEVNAME ": wait for disarm\n"));
  215. interruptible_sleep_on(&sbp.tb_sync);
  216. DBG(printk(DEVNAME ": disarm complete\n"));
  217. }
  218. free_irq(K_INT_TRACE_FREEZE, &sbp);
  219. free_irq(K_INT_PERF_CNT, &sbp);
  220. }
  221. DBG(printk(DEVNAME ": done stopping\n"));
  222. return 0;
  223. }
  224. static int sbprof_tb_open(struct inode *inode, struct file *filp)
  225. {
  226. int minor;
  227. minor = iminor(inode);
  228. if (minor != 0) {
  229. return -ENODEV;
  230. }
  231. if (sbp.open) {
  232. return -EBUSY;
  233. }
  234. memset(&sbp, 0, sizeof(struct sbprof_tb));
  235. sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
  236. if (!sbp.sbprof_tbbuf) {
  237. return -ENOMEM;
  238. }
  239. memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
  240. init_waitqueue_head(&sbp.tb_sync);
  241. init_waitqueue_head(&sbp.tb_read);
  242. sbp.open = 1;
  243. return 0;
  244. }
  245. static int sbprof_tb_release(struct inode *inode, struct file *filp)
  246. {
  247. int minor;
  248. minor = iminor(inode);
  249. if (minor != 0 || !sbp.open) {
  250. return -ENODEV;
  251. }
  252. if (sbp.tb_armed || sbp.tb_enable) {
  253. sbprof_zbprof_stop();
  254. }
  255. vfree(sbp.sbprof_tbbuf);
  256. sbp.open = 0;
  257. return 0;
  258. }
  259. static ssize_t sbprof_tb_read(struct file *filp, char *buf,
  260. size_t size, loff_t *offp)
  261. {
  262. int cur_sample, sample_off, cur_count, sample_left;
  263. char *src;
  264. int count = 0;
  265. char *dest = buf;
  266. long cur_off = *offp;
  267. count = 0;
  268. cur_sample = cur_off / TB_SAMPLE_SIZE;
  269. sample_off = cur_off % TB_SAMPLE_SIZE;
  270. sample_left = TB_SAMPLE_SIZE - sample_off;
  271. while (size && (cur_sample < sbp.next_tb_sample)) {
  272. cur_count = size < sample_left ? size : sample_left;
  273. src = (char *)(((long)sbp.sbprof_tbbuf[cur_sample])+sample_off);
  274. copy_to_user(dest, src, cur_count);
  275. DBG(printk(DEVNAME ": read from sample %d, %d bytes\n",
  276. cur_sample, cur_count));
  277. size -= cur_count;
  278. sample_left -= cur_count;
  279. if (!sample_left) {
  280. cur_sample++;
  281. sample_off = 0;
  282. sample_left = TB_SAMPLE_SIZE;
  283. } else {
  284. sample_off += cur_count;
  285. }
  286. cur_off += cur_count;
  287. dest += cur_count;
  288. count += cur_count;
  289. }
  290. *offp = cur_off;
  291. return count;
  292. }
  293. static int sbprof_tb_ioctl(struct inode *inode,
  294. struct file *filp,
  295. unsigned int command,
  296. unsigned long arg)
  297. {
  298. int error = 0;
  299. switch (command) {
  300. case SBPROF_ZBSTART:
  301. error = sbprof_zbprof_start(filp);
  302. break;
  303. case SBPROF_ZBSTOP:
  304. error = sbprof_zbprof_stop();
  305. break;
  306. case SBPROF_ZBWAITFULL:
  307. interruptible_sleep_on(&sbp.tb_read);
  308. /* XXXKW check if interrupted? */
  309. return put_user(TB_FULL, (int *) arg);
  310. default:
  311. error = -EINVAL;
  312. break;
  313. }
  314. return error;
  315. }
  316. static struct file_operations sbprof_tb_fops = {
  317. .owner = THIS_MODULE,
  318. .open = sbprof_tb_open,
  319. .release = sbprof_tb_release,
  320. .read = sbprof_tb_read,
  321. .ioctl = sbprof_tb_ioctl,
  322. .mmap = NULL,
  323. };
  324. static int __init sbprof_tb_init(void)
  325. {
  326. if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) {
  327. printk(KERN_WARNING DEVNAME ": initialization failed (dev %d)\n",
  328. SBPROF_TB_MAJOR);
  329. return -EIO;
  330. }
  331. sbp.open = 0;
  332. tb_period = zbbus_mhz * 10000LL;
  333. printk(KERN_INFO DEVNAME ": initialized - tb_period = %lld\n", tb_period);
  334. return 0;
  335. }
  336. static void __exit sbprof_tb_cleanup(void)
  337. {
  338. unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
  339. }
  340. module_init(sbprof_tb_init);
  341. module_exit(sbprof_tb_cleanup);