trace_mmiotrace.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /*
  2. * Memory mapped I/O tracing
  3. *
  4. * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
  5. */
  6. #define DEBUG 1
  7. #include <linux/kernel.h>
  8. #include <linux/mmiotrace.h>
  9. #include <linux/pci.h>
  10. #include "trace.h"
  11. struct header_iter {
  12. struct pci_dev *dev;
  13. };
  14. static struct trace_array *mmio_trace_array;
  15. static bool overrun_detected;
  16. static void mmio_reset_data(struct trace_array *tr)
  17. {
  18. int cpu;
  19. overrun_detected = false;
  20. tr->time_start = ftrace_now(tr->cpu);
  21. for_each_online_cpu(cpu)
  22. tracing_reset(tr, cpu);
  23. }
  24. static int mmio_trace_init(struct trace_array *tr)
  25. {
  26. pr_debug("in %s\n", __func__);
  27. mmio_trace_array = tr;
  28. mmio_reset_data(tr);
  29. enable_mmiotrace();
  30. return 0;
  31. }
  32. static void mmio_trace_reset(struct trace_array *tr)
  33. {
  34. pr_debug("in %s\n", __func__);
  35. disable_mmiotrace();
  36. mmio_reset_data(tr);
  37. mmio_trace_array = NULL;
  38. }
  39. static void mmio_trace_start(struct trace_array *tr)
  40. {
  41. pr_debug("in %s\n", __func__);
  42. mmio_reset_data(tr);
  43. }
  44. static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
  45. {
  46. int ret = 0;
  47. int i;
  48. resource_size_t start, end;
  49. const struct pci_driver *drv = pci_dev_driver(dev);
  50. /* XXX: incomplete checks for trace_seq_printf() return value */
  51. ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
  52. dev->bus->number, dev->devfn,
  53. dev->vendor, dev->device, dev->irq);
  54. /*
  55. * XXX: is pci_resource_to_user() appropriate, since we are
  56. * supposed to interpret the __ioremap() phys_addr argument based on
  57. * these printed values?
  58. */
  59. for (i = 0; i < 7; i++) {
  60. pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
  61. ret += trace_seq_printf(s, " %llx",
  62. (unsigned long long)(start |
  63. (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
  64. }
  65. for (i = 0; i < 7; i++) {
  66. pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
  67. ret += trace_seq_printf(s, " %llx",
  68. dev->resource[i].start < dev->resource[i].end ?
  69. (unsigned long long)(end - start) + 1 : 0);
  70. }
  71. if (drv)
  72. ret += trace_seq_printf(s, " %s\n", drv->name);
  73. else
  74. ret += trace_seq_printf(s, " \n");
  75. return ret;
  76. }
  77. static void destroy_header_iter(struct header_iter *hiter)
  78. {
  79. if (!hiter)
  80. return;
  81. pci_dev_put(hiter->dev);
  82. kfree(hiter);
  83. }
  84. static void mmio_pipe_open(struct trace_iterator *iter)
  85. {
  86. struct header_iter *hiter;
  87. struct trace_seq *s = &iter->seq;
  88. trace_seq_printf(s, "VERSION 20070824\n");
  89. hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
  90. if (!hiter)
  91. return;
  92. hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
  93. iter->private = hiter;
  94. }
  95. /* XXX: This is not called when the pipe is closed! */
  96. static void mmio_close(struct trace_iterator *iter)
  97. {
  98. struct header_iter *hiter = iter->private;
  99. destroy_header_iter(hiter);
  100. iter->private = NULL;
  101. }
  102. static unsigned long count_overruns(struct trace_iterator *iter)
  103. {
  104. int cpu;
  105. unsigned long cnt = 0;
  106. /* FIXME: */
  107. #if 0
  108. for_each_online_cpu(cpu) {
  109. cnt += iter->overrun[cpu];
  110. iter->overrun[cpu] = 0;
  111. }
  112. #endif
  113. (void)cpu;
  114. return cnt;
  115. }
  116. static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
  117. char __user *ubuf, size_t cnt, loff_t *ppos)
  118. {
  119. ssize_t ret;
  120. struct header_iter *hiter = iter->private;
  121. struct trace_seq *s = &iter->seq;
  122. unsigned long n;
  123. n = count_overruns(iter);
  124. if (n) {
  125. /* XXX: This is later than where events were lost. */
  126. trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
  127. if (!overrun_detected)
  128. pr_warning("mmiotrace has lost events.\n");
  129. overrun_detected = true;
  130. goto print_out;
  131. }
  132. if (!hiter)
  133. return 0;
  134. mmio_print_pcidev(s, hiter->dev);
  135. hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
  136. if (!hiter->dev) {
  137. destroy_header_iter(hiter);
  138. iter->private = NULL;
  139. }
  140. print_out:
  141. ret = trace_seq_to_user(s, ubuf, cnt);
  142. return (ret == -EBUSY) ? 0 : ret;
  143. }
  144. static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
  145. {
  146. struct trace_entry *entry = iter->ent;
  147. struct trace_mmiotrace_rw *field;
  148. struct mmiotrace_rw *rw;
  149. struct trace_seq *s = &iter->seq;
  150. unsigned long long t = ns2usecs(iter->ts);
  151. unsigned long usec_rem = do_div(t, 1000000ULL);
  152. unsigned secs = (unsigned long)t;
  153. int ret = 1;
  154. trace_assign_type(field, entry);
  155. rw = &field->rw;
  156. switch (rw->opcode) {
  157. case MMIO_READ:
  158. ret = trace_seq_printf(s,
  159. "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
  160. rw->width, secs, usec_rem, rw->map_id,
  161. (unsigned long long)rw->phys,
  162. rw->value, rw->pc, 0);
  163. break;
  164. case MMIO_WRITE:
  165. ret = trace_seq_printf(s,
  166. "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
  167. rw->width, secs, usec_rem, rw->map_id,
  168. (unsigned long long)rw->phys,
  169. rw->value, rw->pc, 0);
  170. break;
  171. case MMIO_UNKNOWN_OP:
  172. ret = trace_seq_printf(s,
  173. "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n",
  174. secs, usec_rem, rw->map_id,
  175. (unsigned long long)rw->phys,
  176. (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
  177. (rw->value >> 0) & 0xff, rw->pc, 0);
  178. break;
  179. default:
  180. ret = trace_seq_printf(s, "rw what?\n");
  181. break;
  182. }
  183. if (ret)
  184. return TRACE_TYPE_HANDLED;
  185. return TRACE_TYPE_PARTIAL_LINE;
  186. }
  187. static enum print_line_t mmio_print_map(struct trace_iterator *iter)
  188. {
  189. struct trace_entry *entry = iter->ent;
  190. struct trace_mmiotrace_map *field;
  191. struct mmiotrace_map *m;
  192. struct trace_seq *s = &iter->seq;
  193. unsigned long long t = ns2usecs(iter->ts);
  194. unsigned long usec_rem = do_div(t, 1000000ULL);
  195. unsigned secs = (unsigned long)t;
  196. int ret;
  197. trace_assign_type(field, entry);
  198. m = &field->map;
  199. switch (m->opcode) {
  200. case MMIO_PROBE:
  201. ret = trace_seq_printf(s,
  202. "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
  203. secs, usec_rem, m->map_id,
  204. (unsigned long long)m->phys, m->virt, m->len,
  205. 0UL, 0);
  206. break;
  207. case MMIO_UNPROBE:
  208. ret = trace_seq_printf(s,
  209. "UNMAP %lu.%06lu %d 0x%lx %d\n",
  210. secs, usec_rem, m->map_id, 0UL, 0);
  211. break;
  212. default:
  213. ret = trace_seq_printf(s, "map what?\n");
  214. break;
  215. }
  216. if (ret)
  217. return TRACE_TYPE_HANDLED;
  218. return TRACE_TYPE_PARTIAL_LINE;
  219. }
  220. static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
  221. {
  222. struct trace_entry *entry = iter->ent;
  223. struct print_entry *print = (struct print_entry *)entry;
  224. const char *msg = print->buf;
  225. struct trace_seq *s = &iter->seq;
  226. unsigned long long t = ns2usecs(iter->ts);
  227. unsigned long usec_rem = do_div(t, 1000000ULL);
  228. unsigned secs = (unsigned long)t;
  229. int ret;
  230. /* The trailing newline must be in the message. */
  231. ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
  232. if (!ret)
  233. return TRACE_TYPE_PARTIAL_LINE;
  234. if (entry->flags & TRACE_FLAG_CONT)
  235. trace_seq_print_cont(s, iter);
  236. return TRACE_TYPE_HANDLED;
  237. }
  238. static enum print_line_t mmio_print_line(struct trace_iterator *iter)
  239. {
  240. switch (iter->ent->type) {
  241. case TRACE_MMIO_RW:
  242. return mmio_print_rw(iter);
  243. case TRACE_MMIO_MAP:
  244. return mmio_print_map(iter);
  245. case TRACE_PRINT:
  246. return mmio_print_mark(iter);
  247. default:
  248. return TRACE_TYPE_HANDLED; /* ignore unknown entries */
  249. }
  250. }
  251. static struct tracer mmio_tracer __read_mostly =
  252. {
  253. .name = "mmiotrace",
  254. .init = mmio_trace_init,
  255. .reset = mmio_trace_reset,
  256. .start = mmio_trace_start,
  257. .pipe_open = mmio_pipe_open,
  258. .close = mmio_close,
  259. .read = mmio_read,
  260. .print_line = mmio_print_line,
  261. };
  262. __init static int init_mmio_trace(void)
  263. {
  264. return register_tracer(&mmio_tracer);
  265. }
  266. device_initcall(init_mmio_trace);
  267. static void __trace_mmiotrace_rw(struct trace_array *tr,
  268. struct trace_array_cpu *data,
  269. struct mmiotrace_rw *rw)
  270. {
  271. struct ring_buffer_event *event;
  272. struct trace_mmiotrace_rw *entry;
  273. unsigned long irq_flags;
  274. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  275. &irq_flags);
  276. if (!event)
  277. return;
  278. entry = ring_buffer_event_data(event);
  279. tracing_generic_entry_update(&entry->ent, 0, preempt_count());
  280. entry->ent.type = TRACE_MMIO_RW;
  281. entry->rw = *rw;
  282. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  283. trace_wake_up();
  284. }
  285. void mmio_trace_rw(struct mmiotrace_rw *rw)
  286. {
  287. struct trace_array *tr = mmio_trace_array;
  288. struct trace_array_cpu *data = tr->data[smp_processor_id()];
  289. __trace_mmiotrace_rw(tr, data, rw);
  290. }
  291. static void __trace_mmiotrace_map(struct trace_array *tr,
  292. struct trace_array_cpu *data,
  293. struct mmiotrace_map *map)
  294. {
  295. struct ring_buffer_event *event;
  296. struct trace_mmiotrace_map *entry;
  297. unsigned long irq_flags;
  298. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  299. &irq_flags);
  300. if (!event)
  301. return;
  302. entry = ring_buffer_event_data(event);
  303. tracing_generic_entry_update(&entry->ent, 0, preempt_count());
  304. entry->ent.type = TRACE_MMIO_MAP;
  305. entry->map = *map;
  306. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  307. trace_wake_up();
  308. }
  309. void mmio_trace_mapping(struct mmiotrace_map *map)
  310. {
  311. struct trace_array *tr = mmio_trace_array;
  312. struct trace_array_cpu *data;
  313. preempt_disable();
  314. data = tr->data[smp_processor_id()];
  315. __trace_mmiotrace_map(tr, data, map);
  316. preempt_enable();
  317. }
  318. int mmio_trace_printk(const char *fmt, va_list args)
  319. {
  320. return trace_vprintk(0, fmt, args);
  321. }