bcm1250_tbprof.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. */
  18. #define SBPROF_TB_DEBUG 0
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/types.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/fs.h>
  27. #include <linux/errno.h>
  28. #include <linux/reboot.h>
  29. #include <linux/smp_lock.h>
  30. #include <linux/wait.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/io.h>
  33. #include <asm/sibyte/sb1250.h>
  34. #include <asm/sibyte/sb1250_regs.h>
  35. #include <asm/sibyte/sb1250_scd.h>
  36. #include <asm/sibyte/sb1250_int.h>
  37. #include <asm/sibyte/trace_prof.h>
  38. #define DEVNAME "bcm1250_tbprof"
  39. static struct sbprof_tb sbp;
  40. #define TB_FULL (sbp.next_tb_sample == MAX_TB_SAMPLES)
  41. /************************************************************************
  42. * Support for ZBbus sampling using the trace buffer
  43. *
  44. * We use the SCD performance counter interrupt, caused by a Zclk counter
  45. * overflow, to trigger the start of tracing.
  46. *
  47. * We set the trace buffer to sample everything and freeze on
  48. * overflow.
  49. *
  50. * We map the interrupt for trace_buffer_freeze to handle it on CPU 0.
  51. *
  52. ************************************************************************/
  53. static u_int64_t tb_period;
  54. static void arm_tb(void)
  55. {
  56. u_int64_t scdperfcnt;
  57. u_int64_t next = (1ULL << 40) - tb_period;
  58. u_int64_t tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
  59. /* Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to
  60. trigger start of trace. XXX vary sampling period */
  61. __raw_writeq(0, IOADDR(A_SCD_PERF_CNT_1));
  62. scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
  63. /* Unfortunately, in Pass 2 we must clear all counters to knock down
  64. a previous interrupt request. This means that bus profiling
  65. requires ALL of the SCD perf counters. */
  66. __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
  67. // keep counters 0,2,3 as is
  68. M_SPC_CFG_ENABLE | // enable counting
  69. M_SPC_CFG_CLEAR | // clear all counters
  70. V_SPC_CFG_SRC1(1), // counter 1 counts cycles
  71. IOADDR(A_SCD_PERF_CNT_CFG));
  72. __raw_writeq(next, IOADDR(A_SCD_PERF_CNT_1));
  73. /* Reset the trace buffer */
  74. __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
  75. #if 0 && defined(M_SCD_TRACE_CFG_FORCECNT)
  76. /* XXXKW may want to expose control to the data-collector */
  77. tb_options |= M_SCD_TRACE_CFG_FORCECNT;
  78. #endif
  79. __raw_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG));
  80. sbp.tb_armed = 1;
  81. }
  82. static irqreturn_t sbprof_tb_intr(int irq, void *dev_id, struct pt_regs *regs)
  83. {
  84. int i;
  85. DBG(printk(DEVNAME ": tb_intr\n"));
  86. if (sbp.next_tb_sample < MAX_TB_SAMPLES) {
  87. /* XXX should use XKPHYS to make writes bypass L2 */
  88. u_int64_t *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++];
  89. /* Read out trace */
  90. __raw_writeq(M_SCD_TRACE_CFG_START_READ,
  91. IOADDR(A_SCD_TRACE_CFG));
  92. __asm__ __volatile__ ("sync" : : : "memory");
  93. /* Loop runs backwards because bundles are read out in reverse order */
  94. for (i = 256 * 6; i > 0; i -= 6) {
  95. // Subscripts decrease to put bundle in the order
  96. // t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi
  97. p[i - 1] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
  98. // read t2 hi
  99. p[i - 2] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
  100. // read t2 lo
  101. p[i - 3] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
  102. // read t1 hi
  103. p[i - 4] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
  104. // read t1 lo
  105. p[i - 5] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
  106. // read t0 hi
  107. p[i - 6] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
  108. // read t0 lo
  109. }
  110. if (!sbp.tb_enable) {
  111. DBG(printk(DEVNAME ": tb_intr shutdown\n"));
  112. __raw_writeq(M_SCD_TRACE_CFG_RESET,
  113. IOADDR(A_SCD_TRACE_CFG));
  114. sbp.tb_armed = 0;
  115. wake_up(&sbp.tb_sync);
  116. } else {
  117. arm_tb(); // knock down current interrupt and get another one later
  118. }
  119. } else {
  120. /* No more trace buffer samples */
  121. DBG(printk(DEVNAME ": tb_intr full\n"));
  122. __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
  123. sbp.tb_armed = 0;
  124. if (!sbp.tb_enable) {
  125. wake_up(&sbp.tb_sync);
  126. }
  127. wake_up(&sbp.tb_read);
  128. }
  129. return IRQ_HANDLED;
  130. }
  131. static irqreturn_t sbprof_pc_intr(int irq, void *dev_id, struct pt_regs *regs)
  132. {
  133. printk(DEVNAME ": unexpected pc_intr");
  134. return IRQ_NONE;
  135. }
  136. int sbprof_zbprof_start(struct file *filp)
  137. {
  138. u_int64_t scdperfcnt;
  139. if (sbp.tb_enable)
  140. return -EBUSY;
  141. DBG(printk(DEVNAME ": starting\n"));
  142. sbp.tb_enable = 1;
  143. sbp.next_tb_sample = 0;
  144. filp->f_pos = 0;
  145. if (request_irq
  146. (K_INT_TRACE_FREEZE, sbprof_tb_intr, 0, DEVNAME " trace freeze", &sbp)) {
  147. return -EBUSY;
  148. }
  149. /* Make sure there isn't a perf-cnt interrupt waiting */
  150. scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
  151. /* Disable and clear counters, override SRC_1 */
  152. __raw_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) |
  153. M_SPC_CFG_ENABLE | M_SPC_CFG_CLEAR | V_SPC_CFG_SRC1(1),
  154. IOADDR(A_SCD_PERF_CNT_CFG));
  155. /* We grab this interrupt to prevent others from trying to use
  156. it, even though we don't want to service the interrupts
  157. (they only feed into the trace-on-interrupt mechanism) */
  158. if (request_irq
  159. (K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
  160. free_irq(K_INT_TRACE_FREEZE, &sbp);
  161. return -EBUSY;
  162. }
  163. /* I need the core to mask these, but the interrupt mapper to
  164. pass them through. I am exploiting my knowledge that
  165. cp0_status masks out IP[5]. krw */
  166. __raw_writeq(K_INT_MAP_I3,
  167. IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
  168. (K_INT_PERF_CNT << 3)));
  169. /* Initialize address traps */
  170. __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_0));
  171. __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_1));
  172. __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_2));
  173. __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_3));
  174. __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0));
  175. __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1));
  176. __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2));
  177. __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3));
  178. __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0));
  179. __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1));
  180. __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2));
  181. __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
  182. /* Initialize Trace Event 0-7 */
  183. // when interrupt
  184. __raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
  185. __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
  186. __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
  187. __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3));
  188. __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4));
  189. __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5));
  190. __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6));
  191. __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7));
  192. /* Initialize Trace Sequence 0-7 */
  193. // Start on event 0 (interrupt)
  194. __raw_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff,
  195. IOADDR(A_SCD_TRACE_SEQUENCE_0));
  196. // dsamp when d used | asamp when a used
  197. __raw_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
  198. K_SCD_TRSEQ_TRIGGER_ALL,
  199. IOADDR(A_SCD_TRACE_SEQUENCE_1));
  200. __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2));
  201. __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3));
  202. __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4));
  203. __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5));
  204. __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6));
  205. __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7));
  206. /* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */
  207. __raw_writeq(1ULL << K_INT_PERF_CNT,
  208. IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE)));
  209. arm_tb();
  210. DBG(printk(DEVNAME ": done starting\n"));
  211. return 0;
  212. }
  213. int sbprof_zbprof_stop(void)
  214. {
  215. DEFINE_WAIT(wait);
  216. DBG(printk(DEVNAME ": stopping\n"));
  217. if (sbp.tb_enable) {
  218. sbp.tb_enable = 0;
  219. /* XXXKW there is a window here where the intr handler
  220. may run, see the disable, and do the wake_up before
  221. this sleep happens. */
  222. if (sbp.tb_armed) {
  223. DBG(printk(DEVNAME ": wait for disarm\n"));
  224. prepare_to_wait(&sbp.tb_sync, &wait, TASK_INTERRUPTIBLE);
  225. schedule();
  226. finish_wait(&sbp.tb_sync, &wait);
  227. DBG(printk(DEVNAME ": disarm complete\n"));
  228. }
  229. free_irq(K_INT_TRACE_FREEZE, &sbp);
  230. free_irq(K_INT_PERF_CNT, &sbp);
  231. }
  232. DBG(printk(DEVNAME ": done stopping\n"));
  233. return 0;
  234. }
  235. static int sbprof_tb_open(struct inode *inode, struct file *filp)
  236. {
  237. int minor;
  238. minor = iminor(inode);
  239. if (minor != 0) {
  240. return -ENODEV;
  241. }
  242. if (sbp.open) {
  243. return -EBUSY;
  244. }
  245. memset(&sbp, 0, sizeof(struct sbprof_tb));
  246. sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
  247. if (!sbp.sbprof_tbbuf) {
  248. return -ENOMEM;
  249. }
  250. memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
  251. init_waitqueue_head(&sbp.tb_sync);
  252. init_waitqueue_head(&sbp.tb_read);
  253. sbp.open = 1;
  254. return 0;
  255. }
  256. static int sbprof_tb_release(struct inode *inode, struct file *filp)
  257. {
  258. int minor;
  259. minor = iminor(inode);
  260. if (minor != 0 || !sbp.open) {
  261. return -ENODEV;
  262. }
  263. if (sbp.tb_armed || sbp.tb_enable) {
  264. sbprof_zbprof_stop();
  265. }
  266. vfree(sbp.sbprof_tbbuf);
  267. sbp.open = 0;
  268. return 0;
  269. }
  270. static ssize_t sbprof_tb_read(struct file *filp, char *buf,
  271. size_t size, loff_t *offp)
  272. {
  273. int cur_sample, sample_off, cur_count, sample_left;
  274. char *src;
  275. int count = 0;
  276. char *dest = buf;
  277. long cur_off = *offp;
  278. count = 0;
  279. cur_sample = cur_off / TB_SAMPLE_SIZE;
  280. sample_off = cur_off % TB_SAMPLE_SIZE;
  281. sample_left = TB_SAMPLE_SIZE - sample_off;
  282. while (size && (cur_sample < sbp.next_tb_sample)) {
  283. cur_count = size < sample_left ? size : sample_left;
  284. src = (char *)(((long)sbp.sbprof_tbbuf[cur_sample])+sample_off);
  285. copy_to_user(dest, src, cur_count);
  286. DBG(printk(DEVNAME ": read from sample %d, %d bytes\n",
  287. cur_sample, cur_count));
  288. size -= cur_count;
  289. sample_left -= cur_count;
  290. if (!sample_left) {
  291. cur_sample++;
  292. sample_off = 0;
  293. sample_left = TB_SAMPLE_SIZE;
  294. } else {
  295. sample_off += cur_count;
  296. }
  297. cur_off += cur_count;
  298. dest += cur_count;
  299. count += cur_count;
  300. }
  301. *offp = cur_off;
  302. return count;
  303. }
  304. static long sbprof_tb_ioctl(struct file *filp,
  305. unsigned int command,
  306. unsigned long arg)
  307. {
  308. int error = 0;
  309. lock_kernel();
  310. switch (command) {
  311. case SBPROF_ZBSTART:
  312. error = sbprof_zbprof_start(filp);
  313. break;
  314. case SBPROF_ZBSTOP:
  315. error = sbprof_zbprof_stop();
  316. break;
  317. case SBPROF_ZBWAITFULL:
  318. DEFINE_WAIT(wait);
  319. prepare_to_wait(&sbp.tb_read, &wait, TASK_INTERRUPTIBLE);
  320. schedule();
  321. finish_wait(&sbp.tb_read, &wait);
  322. /* XXXKW check if interrupted? */
  323. return put_user(TB_FULL, (int *) arg);
  324. default:
  325. error = -EINVAL;
  326. break;
  327. }
  328. unlock_kernel();
  329. return error;
  330. }
  331. static struct file_operations sbprof_tb_fops = {
  332. .owner = THIS_MODULE,
  333. .open = sbprof_tb_open,
  334. .release = sbprof_tb_release,
  335. .read = sbprof_tb_read,
  336. .unlocked_ioctl = sbprof_tb_ioctl,
  337. .compat_ioctl = sbprof_tb_ioctl,
  338. .mmap = NULL,
  339. };
  340. static int __init sbprof_tb_init(void)
  341. {
  342. if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) {
  343. printk(KERN_WARNING DEVNAME ": initialization failed (dev %d)\n",
  344. SBPROF_TB_MAJOR);
  345. return -EIO;
  346. }
  347. sbp.open = 0;
  348. tb_period = zbbus_mhz * 10000LL;
  349. printk(KERN_INFO DEVNAME ": initialized - tb_period = %lld\n", tb_period);
  350. return 0;
  351. }
  352. static void __exit sbprof_tb_cleanup(void)
  353. {
  354. unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
  355. }
  356. module_init(sbprof_tb_init);
  357. module_exit(sbprof_tb_cleanup);