trace_mmiotrace.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /*
  2. * Memory mapped I/O tracing
  3. *
  4. * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
  5. */
  6. #define DEBUG 1
  7. #include <linux/kernel.h>
  8. #include <linux/mmiotrace.h>
  9. #include <linux/pci.h>
  10. #include "trace.h"
  11. struct header_iter {
  12. struct pci_dev *dev;
  13. };
  14. static struct trace_array *mmio_trace_array;
  15. static bool overrun_detected;
  16. static unsigned long prev_overruns;
  17. static void mmio_reset_data(struct trace_array *tr)
  18. {
  19. int cpu;
  20. overrun_detected = false;
  21. prev_overruns = 0;
  22. tr->time_start = ftrace_now(tr->cpu);
  23. for_each_online_cpu(cpu)
  24. tracing_reset(tr, cpu);
  25. }
  26. static void mmio_trace_init(struct trace_array *tr)
  27. {
  28. pr_debug("in %s\n", __func__);
  29. mmio_trace_array = tr;
  30. if (tr->ctrl) {
  31. mmio_reset_data(tr);
  32. enable_mmiotrace();
  33. }
  34. }
  35. static void mmio_trace_reset(struct trace_array *tr)
  36. {
  37. pr_debug("in %s\n", __func__);
  38. if (tr->ctrl)
  39. disable_mmiotrace();
  40. mmio_reset_data(tr);
  41. mmio_trace_array = NULL;
  42. }
  43. static void mmio_trace_ctrl_update(struct trace_array *tr)
  44. {
  45. pr_debug("in %s\n", __func__);
  46. if (tr->ctrl) {
  47. mmio_reset_data(tr);
  48. enable_mmiotrace();
  49. } else {
  50. disable_mmiotrace();
  51. }
  52. }
  53. static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
  54. {
  55. int ret = 0;
  56. int i;
  57. resource_size_t start, end;
  58. const struct pci_driver *drv = pci_dev_driver(dev);
  59. /* XXX: incomplete checks for trace_seq_printf() return value */
  60. ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
  61. dev->bus->number, dev->devfn,
  62. dev->vendor, dev->device, dev->irq);
  63. /*
  64. * XXX: is pci_resource_to_user() appropriate, since we are
  65. * supposed to interpret the __ioremap() phys_addr argument based on
  66. * these printed values?
  67. */
  68. for (i = 0; i < 7; i++) {
  69. pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
  70. ret += trace_seq_printf(s, " %llx",
  71. (unsigned long long)(start |
  72. (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
  73. }
  74. for (i = 0; i < 7; i++) {
  75. pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
  76. ret += trace_seq_printf(s, " %llx",
  77. dev->resource[i].start < dev->resource[i].end ?
  78. (unsigned long long)(end - start) + 1 : 0);
  79. }
  80. if (drv)
  81. ret += trace_seq_printf(s, " %s\n", drv->name);
  82. else
  83. ret += trace_seq_printf(s, " \n");
  84. return ret;
  85. }
  86. static void destroy_header_iter(struct header_iter *hiter)
  87. {
  88. if (!hiter)
  89. return;
  90. pci_dev_put(hiter->dev);
  91. kfree(hiter);
  92. }
  93. static void mmio_pipe_open(struct trace_iterator *iter)
  94. {
  95. struct header_iter *hiter;
  96. struct trace_seq *s = &iter->seq;
  97. trace_seq_printf(s, "VERSION 20070824\n");
  98. hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
  99. if (!hiter)
  100. return;
  101. hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
  102. iter->private = hiter;
  103. }
  104. /* XXX: This is not called when the pipe is closed! */
  105. static void mmio_close(struct trace_iterator *iter)
  106. {
  107. struct header_iter *hiter = iter->private;
  108. destroy_header_iter(hiter);
  109. iter->private = NULL;
  110. }
  111. static unsigned long count_overruns(struct trace_iterator *iter)
  112. {
  113. unsigned long cnt = 0;
  114. unsigned long over = ring_buffer_overruns(iter->tr->buffer);
  115. if (over > prev_overruns)
  116. cnt = over - prev_overruns;
  117. prev_overruns = over;
  118. return cnt;
  119. }
  120. static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
  121. char __user *ubuf, size_t cnt, loff_t *ppos)
  122. {
  123. ssize_t ret;
  124. struct header_iter *hiter = iter->private;
  125. struct trace_seq *s = &iter->seq;
  126. unsigned long n;
  127. n = count_overruns(iter);
  128. if (n) {
  129. /* XXX: This is later than where events were lost. */
  130. trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
  131. if (!overrun_detected)
  132. pr_warning("mmiotrace has lost events.\n");
  133. overrun_detected = true;
  134. goto print_out;
  135. }
  136. if (!hiter)
  137. return 0;
  138. mmio_print_pcidev(s, hiter->dev);
  139. hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
  140. if (!hiter->dev) {
  141. destroy_header_iter(hiter);
  142. iter->private = NULL;
  143. }
  144. print_out:
  145. ret = trace_seq_to_user(s, ubuf, cnt);
  146. return (ret == -EBUSY) ? 0 : ret;
  147. }
  148. static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
  149. {
  150. struct trace_entry *entry = iter->ent;
  151. struct trace_mmiotrace_rw *field;
  152. struct mmiotrace_rw *rw;
  153. struct trace_seq *s = &iter->seq;
  154. unsigned long long t = ns2usecs(iter->ts);
  155. unsigned long usec_rem = do_div(t, 1000000ULL);
  156. unsigned secs = (unsigned long)t;
  157. int ret = 1;
  158. trace_assign_type(field, entry);
  159. rw = &field->rw;
  160. switch (rw->opcode) {
  161. case MMIO_READ:
  162. ret = trace_seq_printf(s,
  163. "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
  164. rw->width, secs, usec_rem, rw->map_id,
  165. (unsigned long long)rw->phys,
  166. rw->value, rw->pc, 0);
  167. break;
  168. case MMIO_WRITE:
  169. ret = trace_seq_printf(s,
  170. "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
  171. rw->width, secs, usec_rem, rw->map_id,
  172. (unsigned long long)rw->phys,
  173. rw->value, rw->pc, 0);
  174. break;
  175. case MMIO_UNKNOWN_OP:
  176. ret = trace_seq_printf(s,
  177. "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n",
  178. secs, usec_rem, rw->map_id,
  179. (unsigned long long)rw->phys,
  180. (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
  181. (rw->value >> 0) & 0xff, rw->pc, 0);
  182. break;
  183. default:
  184. ret = trace_seq_printf(s, "rw what?\n");
  185. break;
  186. }
  187. if (ret)
  188. return TRACE_TYPE_HANDLED;
  189. return TRACE_TYPE_PARTIAL_LINE;
  190. }
  191. static enum print_line_t mmio_print_map(struct trace_iterator *iter)
  192. {
  193. struct trace_entry *entry = iter->ent;
  194. struct trace_mmiotrace_map *field;
  195. struct mmiotrace_map *m;
  196. struct trace_seq *s = &iter->seq;
  197. unsigned long long t = ns2usecs(iter->ts);
  198. unsigned long usec_rem = do_div(t, 1000000ULL);
  199. unsigned secs = (unsigned long)t;
  200. int ret;
  201. trace_assign_type(field, entry);
  202. m = &field->map;
  203. switch (m->opcode) {
  204. case MMIO_PROBE:
  205. ret = trace_seq_printf(s,
  206. "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
  207. secs, usec_rem, m->map_id,
  208. (unsigned long long)m->phys, m->virt, m->len,
  209. 0UL, 0);
  210. break;
  211. case MMIO_UNPROBE:
  212. ret = trace_seq_printf(s,
  213. "UNMAP %lu.%06lu %d 0x%lx %d\n",
  214. secs, usec_rem, m->map_id, 0UL, 0);
  215. break;
  216. default:
  217. ret = trace_seq_printf(s, "map what?\n");
  218. break;
  219. }
  220. if (ret)
  221. return TRACE_TYPE_HANDLED;
  222. return TRACE_TYPE_PARTIAL_LINE;
  223. }
  224. static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
  225. {
  226. struct trace_entry *entry = iter->ent;
  227. struct print_entry *print = (struct print_entry *)entry;
  228. const char *msg = print->buf;
  229. struct trace_seq *s = &iter->seq;
  230. unsigned long long t = ns2usecs(iter->ts);
  231. unsigned long usec_rem = do_div(t, 1000000ULL);
  232. unsigned secs = (unsigned long)t;
  233. int ret;
  234. /* The trailing newline must be in the message. */
  235. ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
  236. if (!ret)
  237. return TRACE_TYPE_PARTIAL_LINE;
  238. if (entry->flags & TRACE_FLAG_CONT)
  239. trace_seq_print_cont(s, iter);
  240. return TRACE_TYPE_HANDLED;
  241. }
  242. static enum print_line_t mmio_print_line(struct trace_iterator *iter)
  243. {
  244. switch (iter->ent->type) {
  245. case TRACE_MMIO_RW:
  246. return mmio_print_rw(iter);
  247. case TRACE_MMIO_MAP:
  248. return mmio_print_map(iter);
  249. case TRACE_PRINT:
  250. return mmio_print_mark(iter);
  251. default:
  252. return TRACE_TYPE_HANDLED; /* ignore unknown entries */
  253. }
  254. }
  255. static struct tracer mmio_tracer __read_mostly =
  256. {
  257. .name = "mmiotrace",
  258. .init = mmio_trace_init,
  259. .reset = mmio_trace_reset,
  260. .pipe_open = mmio_pipe_open,
  261. .close = mmio_close,
  262. .read = mmio_read,
  263. .ctrl_update = mmio_trace_ctrl_update,
  264. .print_line = mmio_print_line,
  265. };
  266. __init static int init_mmio_trace(void)
  267. {
  268. return register_tracer(&mmio_tracer);
  269. }
  270. device_initcall(init_mmio_trace);
  271. static void __trace_mmiotrace_rw(struct trace_array *tr,
  272. struct trace_array_cpu *data,
  273. struct mmiotrace_rw *rw)
  274. {
  275. struct ring_buffer_event *event;
  276. struct trace_mmiotrace_rw *entry;
  277. unsigned long irq_flags;
  278. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  279. &irq_flags);
  280. if (!event)
  281. return;
  282. entry = ring_buffer_event_data(event);
  283. tracing_generic_entry_update(&entry->ent, 0, preempt_count());
  284. entry->ent.type = TRACE_MMIO_RW;
  285. entry->rw = *rw;
  286. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  287. trace_wake_up();
  288. }
  289. void mmio_trace_rw(struct mmiotrace_rw *rw)
  290. {
  291. struct trace_array *tr = mmio_trace_array;
  292. struct trace_array_cpu *data = tr->data[smp_processor_id()];
  293. __trace_mmiotrace_rw(tr, data, rw);
  294. }
  295. static void __trace_mmiotrace_map(struct trace_array *tr,
  296. struct trace_array_cpu *data,
  297. struct mmiotrace_map *map)
  298. {
  299. struct ring_buffer_event *event;
  300. struct trace_mmiotrace_map *entry;
  301. unsigned long irq_flags;
  302. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  303. &irq_flags);
  304. if (!event)
  305. return;
  306. entry = ring_buffer_event_data(event);
  307. tracing_generic_entry_update(&entry->ent, 0, preempt_count());
  308. entry->ent.type = TRACE_MMIO_MAP;
  309. entry->map = *map;
  310. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  311. trace_wake_up();
  312. }
  313. void mmio_trace_mapping(struct mmiotrace_map *map)
  314. {
  315. struct trace_array *tr = mmio_trace_array;
  316. struct trace_array_cpu *data;
  317. preempt_disable();
  318. data = tr->data[smp_processor_id()];
  319. __trace_mmiotrace_map(tr, data, map);
  320. preempt_enable();
  321. }
  322. int mmio_trace_printk(const char *fmt, va_list args)
  323. {
  324. return trace_vprintk(0, fmt, args);
  325. }