kmemtrace.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/dcache.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/fs.h>
  11. #include <linux/seq_file.h>
  12. #include <trace/kmemtrace.h>
  13. #include "trace.h"
  14. #include "trace_output.h"
  15. /* Select an alternative, minimalistic output than the original one */
  16. #define TRACE_KMEM_OPT_MINIMAL 0x1
  17. static struct tracer_opt kmem_opts[] = {
  18. /* Default disable the minimalistic output */
  19. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  20. { }
  21. };
  22. static struct tracer_flags kmem_tracer_flags = {
  23. .val = 0,
  24. .opts = kmem_opts
  25. };
  26. static bool kmem_tracing_enabled __read_mostly;
  27. static struct trace_array *kmemtrace_array;
  28. static int kmem_trace_init(struct trace_array *tr)
  29. {
  30. int cpu;
  31. kmemtrace_array = tr;
  32. for_each_cpu_mask(cpu, cpu_possible_map)
  33. tracing_reset(tr, cpu);
  34. kmem_tracing_enabled = true;
  35. return 0;
  36. }
  37. static void kmem_trace_reset(struct trace_array *tr)
  38. {
  39. kmem_tracing_enabled = false;
  40. }
  41. static void kmemtrace_headers(struct seq_file *s)
  42. {
  43. /* Don't need headers for the original kmemtrace output */
  44. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  45. return;
  46. seq_printf(s, "#\n");
  47. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  48. " POINTER NODE CALLER\n");
  49. seq_printf(s, "# FREE | | | | "
  50. " | | | |\n");
  51. seq_printf(s, "# |\n\n");
  52. }
  53. /*
  54. * The two following functions give the original output from kmemtrace,
  55. * or something close to....perhaps they need some missing things
  56. */
  57. static enum print_line_t
  58. kmemtrace_print_alloc_original(struct trace_iterator *iter,
  59. struct kmemtrace_alloc_entry *entry)
  60. {
  61. struct trace_seq *s = &iter->seq;
  62. int ret;
  63. /* Taken from the old linux/kmemtrace.h */
  64. ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
  65. "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
  66. entry->type_id, entry->call_site, (unsigned long) entry->ptr,
  67. (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
  68. (unsigned long) entry->gfp_flags, entry->node);
  69. if (!ret)
  70. return TRACE_TYPE_PARTIAL_LINE;
  71. return TRACE_TYPE_HANDLED;
  72. }
  73. static enum print_line_t
  74. kmemtrace_print_free_original(struct trace_iterator *iter,
  75. struct kmemtrace_free_entry *entry)
  76. {
  77. struct trace_seq *s = &iter->seq;
  78. int ret;
  79. /* Taken from the old linux/kmemtrace.h */
  80. ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
  81. entry->type_id, entry->call_site, (unsigned long) entry->ptr);
  82. if (!ret)
  83. return TRACE_TYPE_PARTIAL_LINE;
  84. return TRACE_TYPE_HANDLED;
  85. }
  86. /* The two other following provide a more minimalistic output */
  87. static enum print_line_t
  88. kmemtrace_print_alloc_compress(struct trace_iterator *iter,
  89. struct kmemtrace_alloc_entry *entry)
  90. {
  91. struct trace_seq *s = &iter->seq;
  92. int ret;
  93. /* Alloc entry */
  94. ret = trace_seq_printf(s, " + ");
  95. if (!ret)
  96. return TRACE_TYPE_PARTIAL_LINE;
  97. /* Type */
  98. switch (entry->type_id) {
  99. case KMEMTRACE_TYPE_KMALLOC:
  100. ret = trace_seq_printf(s, "K ");
  101. break;
  102. case KMEMTRACE_TYPE_CACHE:
  103. ret = trace_seq_printf(s, "C ");
  104. break;
  105. case KMEMTRACE_TYPE_PAGES:
  106. ret = trace_seq_printf(s, "P ");
  107. break;
  108. default:
  109. ret = trace_seq_printf(s, "? ");
  110. }
  111. if (!ret)
  112. return TRACE_TYPE_PARTIAL_LINE;
  113. /* Requested */
  114. ret = trace_seq_printf(s, "%4d ", entry->bytes_req);
  115. if (!ret)
  116. return TRACE_TYPE_PARTIAL_LINE;
  117. /* Allocated */
  118. ret = trace_seq_printf(s, "%4d ", entry->bytes_alloc);
  119. if (!ret)
  120. return TRACE_TYPE_PARTIAL_LINE;
  121. /* Flags
  122. * TODO: would be better to see the name of the GFP flag names
  123. */
  124. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  125. if (!ret)
  126. return TRACE_TYPE_PARTIAL_LINE;
  127. /* Pointer to allocated */
  128. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  129. if (!ret)
  130. return TRACE_TYPE_PARTIAL_LINE;
  131. /* Node */
  132. ret = trace_seq_printf(s, "%4d ", entry->node);
  133. if (!ret)
  134. return TRACE_TYPE_PARTIAL_LINE;
  135. /* Call site */
  136. ret = seq_print_ip_sym(s, entry->call_site, 0);
  137. if (!ret)
  138. return TRACE_TYPE_PARTIAL_LINE;
  139. if (!trace_seq_printf(s, "\n"))
  140. return TRACE_TYPE_PARTIAL_LINE;
  141. return TRACE_TYPE_HANDLED;
  142. }
  143. static enum print_line_t
  144. kmemtrace_print_free_compress(struct trace_iterator *iter,
  145. struct kmemtrace_free_entry *entry)
  146. {
  147. struct trace_seq *s = &iter->seq;
  148. int ret;
  149. /* Free entry */
  150. ret = trace_seq_printf(s, " - ");
  151. if (!ret)
  152. return TRACE_TYPE_PARTIAL_LINE;
  153. /* Type */
  154. switch (entry->type_id) {
  155. case KMEMTRACE_TYPE_KMALLOC:
  156. ret = trace_seq_printf(s, "K ");
  157. break;
  158. case KMEMTRACE_TYPE_CACHE:
  159. ret = trace_seq_printf(s, "C ");
  160. break;
  161. case KMEMTRACE_TYPE_PAGES:
  162. ret = trace_seq_printf(s, "P ");
  163. break;
  164. default:
  165. ret = trace_seq_printf(s, "? ");
  166. }
  167. if (!ret)
  168. return TRACE_TYPE_PARTIAL_LINE;
  169. /* Skip requested/allocated/flags */
  170. ret = trace_seq_printf(s, " ");
  171. if (!ret)
  172. return TRACE_TYPE_PARTIAL_LINE;
  173. /* Pointer to allocated */
  174. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  175. if (!ret)
  176. return TRACE_TYPE_PARTIAL_LINE;
  177. /* Skip node */
  178. ret = trace_seq_printf(s, " ");
  179. if (!ret)
  180. return TRACE_TYPE_PARTIAL_LINE;
  181. /* Call site */
  182. ret = seq_print_ip_sym(s, entry->call_site, 0);
  183. if (!ret)
  184. return TRACE_TYPE_PARTIAL_LINE;
  185. if (!trace_seq_printf(s, "\n"))
  186. return TRACE_TYPE_PARTIAL_LINE;
  187. return TRACE_TYPE_HANDLED;
  188. }
  189. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  190. {
  191. struct trace_entry *entry = iter->ent;
  192. switch (entry->type) {
  193. case TRACE_KMEM_ALLOC: {
  194. struct kmemtrace_alloc_entry *field;
  195. trace_assign_type(field, entry);
  196. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  197. return kmemtrace_print_alloc_compress(iter, field);
  198. else
  199. return kmemtrace_print_alloc_original(iter, field);
  200. }
  201. case TRACE_KMEM_FREE: {
  202. struct kmemtrace_free_entry *field;
  203. trace_assign_type(field, entry);
  204. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  205. return kmemtrace_print_free_compress(iter, field);
  206. else
  207. return kmemtrace_print_free_original(iter, field);
  208. }
  209. default:
  210. return TRACE_TYPE_UNHANDLED;
  211. }
  212. }
  213. /* Trace allocations */
  214. void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
  215. unsigned long call_site,
  216. const void *ptr,
  217. size_t bytes_req,
  218. size_t bytes_alloc,
  219. gfp_t gfp_flags,
  220. int node)
  221. {
  222. struct ring_buffer_event *event;
  223. struct kmemtrace_alloc_entry *entry;
  224. struct trace_array *tr = kmemtrace_array;
  225. unsigned long irq_flags;
  226. if (!kmem_tracing_enabled)
  227. return;
  228. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  229. &irq_flags);
  230. if (!event)
  231. return;
  232. entry = ring_buffer_event_data(event);
  233. tracing_generic_entry_update(&entry->ent, 0, 0);
  234. entry->ent.type = TRACE_KMEM_ALLOC;
  235. entry->call_site = call_site;
  236. entry->ptr = ptr;
  237. entry->bytes_req = bytes_req;
  238. entry->bytes_alloc = bytes_alloc;
  239. entry->gfp_flags = gfp_flags;
  240. entry->node = node;
  241. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  242. trace_wake_up();
  243. }
  244. void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
  245. unsigned long call_site,
  246. const void *ptr)
  247. {
  248. struct ring_buffer_event *event;
  249. struct kmemtrace_free_entry *entry;
  250. struct trace_array *tr = kmemtrace_array;
  251. unsigned long irq_flags;
  252. if (!kmem_tracing_enabled)
  253. return;
  254. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  255. &irq_flags);
  256. if (!event)
  257. return;
  258. entry = ring_buffer_event_data(event);
  259. tracing_generic_entry_update(&entry->ent, 0, 0);
  260. entry->ent.type = TRACE_KMEM_FREE;
  261. entry->type_id = type_id;
  262. entry->call_site = call_site;
  263. entry->ptr = ptr;
  264. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  265. trace_wake_up();
  266. }
  267. static struct tracer kmem_tracer __read_mostly = {
  268. .name = "kmemtrace",
  269. .init = kmem_trace_init,
  270. .reset = kmem_trace_reset,
  271. .print_line = kmemtrace_print_line,
  272. .print_header = kmemtrace_headers,
  273. .flags = &kmem_tracer_flags
  274. };
  275. static int __init init_kmem_tracer(void)
  276. {
  277. return register_tracer(&kmem_tracer);
  278. }
  279. device_initcall(init_kmem_tracer);