kmemtrace.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/dcache.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/fs.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/tracepoint.h>
  13. #include <trace/kmemtrace.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. /* Select an alternative, minimalistic output than the original one */
  17. #define TRACE_KMEM_OPT_MINIMAL 0x1
  18. static struct tracer_opt kmem_opts[] = {
  19. /* Default disable the minimalistic output */
  20. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  21. { }
  22. };
  23. static struct tracer_flags kmem_tracer_flags = {
  24. .val = 0,
  25. .opts = kmem_opts
  26. };
  27. static struct trace_array *kmemtrace_array;
  28. /* Trace allocations */
  29. static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
  30. unsigned long call_site,
  31. const void *ptr,
  32. size_t bytes_req,
  33. size_t bytes_alloc,
  34. gfp_t gfp_flags,
  35. int node)
  36. {
  37. struct ring_buffer_event *event;
  38. struct kmemtrace_alloc_entry *entry;
  39. struct trace_array *tr = kmemtrace_array;
  40. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  41. if (!event)
  42. return;
  43. entry = ring_buffer_event_data(event);
  44. tracing_generic_entry_update(&entry->ent, 0, 0);
  45. entry->ent.type = TRACE_KMEM_ALLOC;
  46. entry->type_id = type_id;
  47. entry->call_site = call_site;
  48. entry->ptr = ptr;
  49. entry->bytes_req = bytes_req;
  50. entry->bytes_alloc = bytes_alloc;
  51. entry->gfp_flags = gfp_flags;
  52. entry->node = node;
  53. ring_buffer_unlock_commit(tr->buffer, event);
  54. trace_wake_up();
  55. }
  56. static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
  57. unsigned long call_site,
  58. const void *ptr)
  59. {
  60. struct ring_buffer_event *event;
  61. struct kmemtrace_free_entry *entry;
  62. struct trace_array *tr = kmemtrace_array;
  63. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  64. if (!event)
  65. return;
  66. entry = ring_buffer_event_data(event);
  67. tracing_generic_entry_update(&entry->ent, 0, 0);
  68. entry->ent.type = TRACE_KMEM_FREE;
  69. entry->type_id = type_id;
  70. entry->call_site = call_site;
  71. entry->ptr = ptr;
  72. ring_buffer_unlock_commit(tr->buffer, event);
  73. trace_wake_up();
  74. }
  75. static void kmemtrace_kmalloc(unsigned long call_site,
  76. const void *ptr,
  77. size_t bytes_req,
  78. size_t bytes_alloc,
  79. gfp_t gfp_flags)
  80. {
  81. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  82. bytes_req, bytes_alloc, gfp_flags, -1);
  83. }
  84. static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
  85. const void *ptr,
  86. size_t bytes_req,
  87. size_t bytes_alloc,
  88. gfp_t gfp_flags)
  89. {
  90. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  91. bytes_req, bytes_alloc, gfp_flags, -1);
  92. }
  93. static void kmemtrace_kmalloc_node(unsigned long call_site,
  94. const void *ptr,
  95. size_t bytes_req,
  96. size_t bytes_alloc,
  97. gfp_t gfp_flags,
  98. int node)
  99. {
  100. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  101. bytes_req, bytes_alloc, gfp_flags, node);
  102. }
  103. static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
  104. const void *ptr,
  105. size_t bytes_req,
  106. size_t bytes_alloc,
  107. gfp_t gfp_flags,
  108. int node)
  109. {
  110. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  111. bytes_req, bytes_alloc, gfp_flags, node);
  112. }
  113. static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
  114. {
  115. kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
  116. }
  117. static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
  118. {
  119. kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
  120. }
  121. static int kmemtrace_start_probes(void)
  122. {
  123. int err;
  124. err = register_trace_kmalloc(kmemtrace_kmalloc);
  125. if (err)
  126. return err;
  127. err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  128. if (err)
  129. return err;
  130. err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
  131. if (err)
  132. return err;
  133. err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  134. if (err)
  135. return err;
  136. err = register_trace_kfree(kmemtrace_kfree);
  137. if (err)
  138. return err;
  139. err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  140. return err;
  141. }
  142. static void kmemtrace_stop_probes(void)
  143. {
  144. unregister_trace_kmalloc(kmemtrace_kmalloc);
  145. unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  146. unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
  147. unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  148. unregister_trace_kfree(kmemtrace_kfree);
  149. unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  150. }
  151. static int kmem_trace_init(struct trace_array *tr)
  152. {
  153. int cpu;
  154. kmemtrace_array = tr;
  155. for_each_cpu_mask(cpu, cpu_possible_map)
  156. tracing_reset(tr, cpu);
  157. kmemtrace_start_probes();
  158. return 0;
  159. }
  160. static void kmem_trace_reset(struct trace_array *tr)
  161. {
  162. kmemtrace_stop_probes();
  163. }
  164. static void kmemtrace_headers(struct seq_file *s)
  165. {
  166. /* Don't need headers for the original kmemtrace output */
  167. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  168. return;
  169. seq_printf(s, "#\n");
  170. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  171. " POINTER NODE CALLER\n");
  172. seq_printf(s, "# FREE | | | | "
  173. " | | | |\n");
  174. seq_printf(s, "# |\n\n");
  175. }
  176. /*
  177. * The two following functions give the original output from kmemtrace,
  178. * or something close to....perhaps they need some missing things
  179. */
  180. static enum print_line_t
  181. kmemtrace_print_alloc_original(struct trace_iterator *iter,
  182. struct kmemtrace_alloc_entry *entry)
  183. {
  184. struct trace_seq *s = &iter->seq;
  185. int ret;
  186. /* Taken from the old linux/kmemtrace.h */
  187. ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
  188. "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
  189. entry->type_id, entry->call_site, (unsigned long) entry->ptr,
  190. (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
  191. (unsigned long) entry->gfp_flags, entry->node);
  192. if (!ret)
  193. return TRACE_TYPE_PARTIAL_LINE;
  194. return TRACE_TYPE_HANDLED;
  195. }
  196. static enum print_line_t
  197. kmemtrace_print_free_original(struct trace_iterator *iter,
  198. struct kmemtrace_free_entry *entry)
  199. {
  200. struct trace_seq *s = &iter->seq;
  201. int ret;
  202. /* Taken from the old linux/kmemtrace.h */
  203. ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
  204. entry->type_id, entry->call_site, (unsigned long) entry->ptr);
  205. if (!ret)
  206. return TRACE_TYPE_PARTIAL_LINE;
  207. return TRACE_TYPE_HANDLED;
  208. }
  209. /* The two other following provide a more minimalistic output */
  210. static enum print_line_t
  211. kmemtrace_print_alloc_compress(struct trace_iterator *iter,
  212. struct kmemtrace_alloc_entry *entry)
  213. {
  214. struct trace_seq *s = &iter->seq;
  215. int ret;
  216. /* Alloc entry */
  217. ret = trace_seq_printf(s, " + ");
  218. if (!ret)
  219. return TRACE_TYPE_PARTIAL_LINE;
  220. /* Type */
  221. switch (entry->type_id) {
  222. case KMEMTRACE_TYPE_KMALLOC:
  223. ret = trace_seq_printf(s, "K ");
  224. break;
  225. case KMEMTRACE_TYPE_CACHE:
  226. ret = trace_seq_printf(s, "C ");
  227. break;
  228. case KMEMTRACE_TYPE_PAGES:
  229. ret = trace_seq_printf(s, "P ");
  230. break;
  231. default:
  232. ret = trace_seq_printf(s, "? ");
  233. }
  234. if (!ret)
  235. return TRACE_TYPE_PARTIAL_LINE;
  236. /* Requested */
  237. ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
  238. if (!ret)
  239. return TRACE_TYPE_PARTIAL_LINE;
  240. /* Allocated */
  241. ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
  242. if (!ret)
  243. return TRACE_TYPE_PARTIAL_LINE;
  244. /* Flags
  245. * TODO: would be better to see the name of the GFP flag names
  246. */
  247. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  248. if (!ret)
  249. return TRACE_TYPE_PARTIAL_LINE;
  250. /* Pointer to allocated */
  251. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  252. if (!ret)
  253. return TRACE_TYPE_PARTIAL_LINE;
  254. /* Node */
  255. ret = trace_seq_printf(s, "%4d ", entry->node);
  256. if (!ret)
  257. return TRACE_TYPE_PARTIAL_LINE;
  258. /* Call site */
  259. ret = seq_print_ip_sym(s, entry->call_site, 0);
  260. if (!ret)
  261. return TRACE_TYPE_PARTIAL_LINE;
  262. if (!trace_seq_printf(s, "\n"))
  263. return TRACE_TYPE_PARTIAL_LINE;
  264. return TRACE_TYPE_HANDLED;
  265. }
  266. static enum print_line_t
  267. kmemtrace_print_free_compress(struct trace_iterator *iter,
  268. struct kmemtrace_free_entry *entry)
  269. {
  270. struct trace_seq *s = &iter->seq;
  271. int ret;
  272. /* Free entry */
  273. ret = trace_seq_printf(s, " - ");
  274. if (!ret)
  275. return TRACE_TYPE_PARTIAL_LINE;
  276. /* Type */
  277. switch (entry->type_id) {
  278. case KMEMTRACE_TYPE_KMALLOC:
  279. ret = trace_seq_printf(s, "K ");
  280. break;
  281. case KMEMTRACE_TYPE_CACHE:
  282. ret = trace_seq_printf(s, "C ");
  283. break;
  284. case KMEMTRACE_TYPE_PAGES:
  285. ret = trace_seq_printf(s, "P ");
  286. break;
  287. default:
  288. ret = trace_seq_printf(s, "? ");
  289. }
  290. if (!ret)
  291. return TRACE_TYPE_PARTIAL_LINE;
  292. /* Skip requested/allocated/flags */
  293. ret = trace_seq_printf(s, " ");
  294. if (!ret)
  295. return TRACE_TYPE_PARTIAL_LINE;
  296. /* Pointer to allocated */
  297. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  298. if (!ret)
  299. return TRACE_TYPE_PARTIAL_LINE;
  300. /* Skip node */
  301. ret = trace_seq_printf(s, " ");
  302. if (!ret)
  303. return TRACE_TYPE_PARTIAL_LINE;
  304. /* Call site */
  305. ret = seq_print_ip_sym(s, entry->call_site, 0);
  306. if (!ret)
  307. return TRACE_TYPE_PARTIAL_LINE;
  308. if (!trace_seq_printf(s, "\n"))
  309. return TRACE_TYPE_PARTIAL_LINE;
  310. return TRACE_TYPE_HANDLED;
  311. }
  312. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  313. {
  314. struct trace_entry *entry = iter->ent;
  315. switch (entry->type) {
  316. case TRACE_KMEM_ALLOC: {
  317. struct kmemtrace_alloc_entry *field;
  318. trace_assign_type(field, entry);
  319. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  320. return kmemtrace_print_alloc_compress(iter, field);
  321. else
  322. return kmemtrace_print_alloc_original(iter, field);
  323. }
  324. case TRACE_KMEM_FREE: {
  325. struct kmemtrace_free_entry *field;
  326. trace_assign_type(field, entry);
  327. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  328. return kmemtrace_print_free_compress(iter, field);
  329. else
  330. return kmemtrace_print_free_original(iter, field);
  331. }
  332. default:
  333. return TRACE_TYPE_UNHANDLED;
  334. }
  335. }
  336. static struct tracer kmem_tracer __read_mostly = {
  337. .name = "kmemtrace",
  338. .init = kmem_trace_init,
  339. .reset = kmem_trace_reset,
  340. .print_line = kmemtrace_print_line,
  341. .print_header = kmemtrace_headers,
  342. .flags = &kmem_tracer_flags
  343. };
  344. void kmemtrace_init(void)
  345. {
  346. /* earliest opportunity to start kmem tracing */
  347. }
  348. static int __init init_kmem_tracer(void)
  349. {
  350. return register_tracer(&kmem_tracer);
  351. }
  352. device_initcall(init_kmem_tracer);