trace_output.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /*
  2. * trace_output.c
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/ftrace.h>
  10. #include "trace_output.h"
  11. /* must be a power of 2 */
  12. #define EVENT_HASHSIZE 128
  13. static DEFINE_MUTEX(trace_event_mutex);
  14. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  15. static int next_event_type = __TRACE_LAST_TYPE + 1;
  16. /**
  17. * trace_seq_printf - sequence printing of trace information
  18. * @s: trace sequence descriptor
  19. * @fmt: printf format string
  20. *
  21. * The tracer may use either sequence operations or its own
  22. * copy to user routines. To simplify formating of a trace
  23. * trace_seq_printf is used to store strings into a special
  24. * buffer (@s). Then the output may be either used by
  25. * the sequencer or pulled into another buffer.
  26. */
  27. int
  28. trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
  29. {
  30. int len = (PAGE_SIZE - 1) - s->len;
  31. va_list ap;
  32. int ret;
  33. if (!len)
  34. return 0;
  35. va_start(ap, fmt);
  36. ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
  37. va_end(ap);
  38. /* If we can't write it all, don't bother writing anything */
  39. if (ret >= len)
  40. return 0;
  41. s->len += ret;
  42. return len;
  43. }
  44. /**
  45. * trace_seq_puts - trace sequence printing of simple string
  46. * @s: trace sequence descriptor
  47. * @str: simple string to record
  48. *
  49. * The tracer may use either the sequence operations or its own
  50. * copy to user routines. This function records a simple string
  51. * into a special buffer (@s) for later retrieval by a sequencer
  52. * or other mechanism.
  53. */
  54. int trace_seq_puts(struct trace_seq *s, const char *str)
  55. {
  56. int len = strlen(str);
  57. if (len > ((PAGE_SIZE - 1) - s->len))
  58. return 0;
  59. memcpy(s->buffer + s->len, str, len);
  60. s->len += len;
  61. return len;
  62. }
  63. int trace_seq_putc(struct trace_seq *s, unsigned char c)
  64. {
  65. if (s->len >= (PAGE_SIZE - 1))
  66. return 0;
  67. s->buffer[s->len++] = c;
  68. return 1;
  69. }
  70. int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
  71. {
  72. if (len > ((PAGE_SIZE - 1) - s->len))
  73. return 0;
  74. memcpy(s->buffer + s->len, mem, len);
  75. s->len += len;
  76. return len;
  77. }
  78. int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
  79. {
  80. unsigned char hex[HEX_CHARS];
  81. unsigned char *data = mem;
  82. int i, j;
  83. #ifdef __BIG_ENDIAN
  84. for (i = 0, j = 0; i < len; i++) {
  85. #else
  86. for (i = len-1, j = 0; i >= 0; i--) {
  87. #endif
  88. hex[j++] = hex_asc_hi(data[i]);
  89. hex[j++] = hex_asc_lo(data[i]);
  90. }
  91. hex[j++] = ' ';
  92. return trace_seq_putmem(s, hex, j);
  93. }
  94. int trace_seq_path(struct trace_seq *s, struct path *path)
  95. {
  96. unsigned char *p;
  97. if (s->len >= (PAGE_SIZE - 1))
  98. return 0;
  99. p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
  100. if (!IS_ERR(p)) {
  101. p = mangle_path(s->buffer + s->len, p, "\n");
  102. if (p) {
  103. s->len = p - s->buffer;
  104. return 1;
  105. }
  106. } else {
  107. s->buffer[s->len++] = '?';
  108. return 1;
  109. }
  110. return 0;
  111. }
  112. #ifdef CONFIG_KRETPROBES
  113. static inline const char *kretprobed(const char *name)
  114. {
  115. static const char tramp_name[] = "kretprobe_trampoline";
  116. int size = sizeof(tramp_name);
  117. if (strncmp(tramp_name, name, size) == 0)
  118. return "[unknown/kretprobe'd]";
  119. return name;
  120. }
  121. #else
  122. static inline const char *kretprobed(const char *name)
  123. {
  124. return name;
  125. }
  126. #endif /* CONFIG_KRETPROBES */
  127. static int
  128. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  129. {
  130. #ifdef CONFIG_KALLSYMS
  131. char str[KSYM_SYMBOL_LEN];
  132. const char *name;
  133. kallsyms_lookup(address, NULL, NULL, NULL, str);
  134. name = kretprobed(str);
  135. return trace_seq_printf(s, fmt, name);
  136. #endif
  137. return 1;
  138. }
  139. static int
  140. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  141. unsigned long address)
  142. {
  143. #ifdef CONFIG_KALLSYMS
  144. char str[KSYM_SYMBOL_LEN];
  145. const char *name;
  146. sprint_symbol(str, address);
  147. name = kretprobed(str);
  148. return trace_seq_printf(s, fmt, name);
  149. #endif
  150. return 1;
  151. }
  152. #ifndef CONFIG_64BIT
  153. # define IP_FMT "%08lx"
  154. #else
  155. # define IP_FMT "%016lx"
  156. #endif
  157. int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  158. unsigned long ip, unsigned long sym_flags)
  159. {
  160. struct file *file = NULL;
  161. unsigned long vmstart = 0;
  162. int ret = 1;
  163. if (mm) {
  164. const struct vm_area_struct *vma;
  165. down_read(&mm->mmap_sem);
  166. vma = find_vma(mm, ip);
  167. if (vma) {
  168. file = vma->vm_file;
  169. vmstart = vma->vm_start;
  170. }
  171. if (file) {
  172. ret = trace_seq_path(s, &file->f_path);
  173. if (ret)
  174. ret = trace_seq_printf(s, "[+0x%lx]",
  175. ip - vmstart);
  176. }
  177. up_read(&mm->mmap_sem);
  178. }
  179. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  180. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  181. return ret;
  182. }
  183. int
  184. seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
  185. unsigned long sym_flags)
  186. {
  187. struct mm_struct *mm = NULL;
  188. int ret = 1;
  189. unsigned int i;
  190. if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
  191. struct task_struct *task;
  192. /*
  193. * we do the lookup on the thread group leader,
  194. * since individual threads might have already quit!
  195. */
  196. rcu_read_lock();
  197. task = find_task_by_vpid(entry->ent.tgid);
  198. if (task)
  199. mm = get_task_mm(task);
  200. rcu_read_unlock();
  201. }
  202. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  203. unsigned long ip = entry->caller[i];
  204. if (ip == ULONG_MAX || !ret)
  205. break;
  206. if (i && ret)
  207. ret = trace_seq_puts(s, " <- ");
  208. if (!ip) {
  209. if (ret)
  210. ret = trace_seq_puts(s, "??");
  211. continue;
  212. }
  213. if (!ret)
  214. break;
  215. if (ret)
  216. ret = seq_print_user_ip(s, mm, ip, sym_flags);
  217. }
  218. if (mm)
  219. mmput(mm);
  220. return ret;
  221. }
  222. int
  223. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  224. {
  225. int ret;
  226. if (!ip)
  227. return trace_seq_printf(s, "0");
  228. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  229. ret = seq_print_sym_offset(s, "%s", ip);
  230. else
  231. ret = seq_print_sym_short(s, "%s", ip);
  232. if (!ret)
  233. return 0;
  234. if (sym_flags & TRACE_ITER_SYM_ADDR)
  235. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  236. return ret;
  237. }
  238. /**
  239. * ftrace_find_event - find a registered event
  240. * @type: the type of event to look for
  241. *
  242. * Returns an event of type @type otherwise NULL
  243. */
  244. struct trace_event *ftrace_find_event(int type)
  245. {
  246. struct trace_event *event;
  247. struct hlist_node *n;
  248. unsigned key;
  249. key = type & (EVENT_HASHSIZE - 1);
  250. hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
  251. if (event->type == type)
  252. return event;
  253. }
  254. return NULL;
  255. }
  256. /**
  257. * register_ftrace_event - register output for an event type
  258. * @event: the event type to register
  259. *
  260. * Event types are stored in a hash and this hash is used to
  261. * find a way to print an event. If the @event->type is set
  262. * then it will use that type, otherwise it will assign a
  263. * type to use.
  264. *
  265. * If you assign your own type, please make sure it is added
  266. * to the trace_type enum in trace.h, to avoid collisions
  267. * with the dynamic types.
  268. *
  269. * Returns the event type number or zero on error.
  270. */
  271. int register_ftrace_event(struct trace_event *event)
  272. {
  273. unsigned key;
  274. int ret = 0;
  275. mutex_lock(&trace_event_mutex);
  276. if (!event->type)
  277. event->type = next_event_type++;
  278. else if (event->type > __TRACE_LAST_TYPE) {
  279. printk(KERN_WARNING "Need to add type to trace.h\n");
  280. WARN_ON(1);
  281. }
  282. if (ftrace_find_event(event->type))
  283. goto out;
  284. key = event->type & (EVENT_HASHSIZE - 1);
  285. hlist_add_head_rcu(&event->node, &event_hash[key]);
  286. ret = event->type;
  287. out:
  288. mutex_unlock(&trace_event_mutex);
  289. return ret;
  290. }
  291. /**
  292. * unregister_ftrace_event - remove a no longer used event
  293. * @event: the event to remove
  294. */
  295. int unregister_ftrace_event(struct trace_event *event)
  296. {
  297. mutex_lock(&trace_event_mutex);
  298. hlist_del(&event->node);
  299. mutex_unlock(&trace_event_mutex);
  300. return 0;
  301. }