kmemtrace.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/tracepoint.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/dcache.h>
  12. #include <linux/fs.h>
  13. #include <trace/kmemtrace.h>
  14. #include "trace_output.h"
  15. #include "trace.h"
  16. /* Select an alternative, minimalistic output than the original one */
  17. #define TRACE_KMEM_OPT_MINIMAL 0x1
  18. static struct tracer_opt kmem_opts[] = {
  19. /* Default disable the minimalistic output */
  20. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  21. { }
  22. };
  23. static struct tracer_flags kmem_tracer_flags = {
  24. .val = 0,
  25. .opts = kmem_opts
  26. };
  27. static struct trace_array *kmemtrace_array;
  28. /* Trace allocations */
  29. static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
  30. unsigned long call_site,
  31. const void *ptr,
  32. size_t bytes_req,
  33. size_t bytes_alloc,
  34. gfp_t gfp_flags,
  35. int node)
  36. {
  37. struct trace_array *tr = kmemtrace_array;
  38. struct kmemtrace_alloc_entry *entry;
  39. struct ring_buffer_event *event;
  40. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  41. if (!event)
  42. return;
  43. entry = ring_buffer_event_data(event);
  44. tracing_generic_entry_update(&entry->ent, 0, 0);
  45. entry->ent.type = TRACE_KMEM_ALLOC;
  46. entry->type_id = type_id;
  47. entry->call_site = call_site;
  48. entry->ptr = ptr;
  49. entry->bytes_req = bytes_req;
  50. entry->bytes_alloc = bytes_alloc;
  51. entry->gfp_flags = gfp_flags;
  52. entry->node = node;
  53. ring_buffer_unlock_commit(tr->buffer, event);
  54. trace_wake_up();
  55. }
  56. static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
  57. unsigned long call_site,
  58. const void *ptr)
  59. {
  60. struct trace_array *tr = kmemtrace_array;
  61. struct kmemtrace_free_entry *entry;
  62. struct ring_buffer_event *event;
  63. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  64. if (!event)
  65. return;
  66. entry = ring_buffer_event_data(event);
  67. tracing_generic_entry_update(&entry->ent, 0, 0);
  68. entry->ent.type = TRACE_KMEM_FREE;
  69. entry->type_id = type_id;
  70. entry->call_site = call_site;
  71. entry->ptr = ptr;
  72. ring_buffer_unlock_commit(tr->buffer, event);
  73. trace_wake_up();
  74. }
  75. static void kmemtrace_kmalloc(unsigned long call_site,
  76. const void *ptr,
  77. size_t bytes_req,
  78. size_t bytes_alloc,
  79. gfp_t gfp_flags)
  80. {
  81. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  82. bytes_req, bytes_alloc, gfp_flags, -1);
  83. }
  84. static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
  85. const void *ptr,
  86. size_t bytes_req,
  87. size_t bytes_alloc,
  88. gfp_t gfp_flags)
  89. {
  90. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  91. bytes_req, bytes_alloc, gfp_flags, -1);
  92. }
  93. static void kmemtrace_kmalloc_node(unsigned long call_site,
  94. const void *ptr,
  95. size_t bytes_req,
  96. size_t bytes_alloc,
  97. gfp_t gfp_flags,
  98. int node)
  99. {
  100. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  101. bytes_req, bytes_alloc, gfp_flags, node);
  102. }
  103. static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
  104. const void *ptr,
  105. size_t bytes_req,
  106. size_t bytes_alloc,
  107. gfp_t gfp_flags,
  108. int node)
  109. {
  110. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  111. bytes_req, bytes_alloc, gfp_flags, node);
  112. }
  113. static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
  114. {
  115. kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
  116. }
  117. static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
  118. {
  119. kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
  120. }
  121. static int kmemtrace_start_probes(void)
  122. {
  123. int err;
  124. err = register_trace_kmalloc(kmemtrace_kmalloc);
  125. if (err)
  126. return err;
  127. err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  128. if (err)
  129. return err;
  130. err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
  131. if (err)
  132. return err;
  133. err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  134. if (err)
  135. return err;
  136. err = register_trace_kfree(kmemtrace_kfree);
  137. if (err)
  138. return err;
  139. err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  140. return err;
  141. }
  142. static void kmemtrace_stop_probes(void)
  143. {
  144. unregister_trace_kmalloc(kmemtrace_kmalloc);
  145. unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  146. unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
  147. unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  148. unregister_trace_kfree(kmemtrace_kfree);
  149. unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  150. }
  151. static int kmem_trace_init(struct trace_array *tr)
  152. {
  153. int cpu;
  154. kmemtrace_array = tr;
  155. for_each_cpu_mask(cpu, cpu_possible_map)
  156. tracing_reset(tr, cpu);
  157. kmemtrace_start_probes();
  158. return 0;
  159. }
  160. static void kmem_trace_reset(struct trace_array *tr)
  161. {
  162. kmemtrace_stop_probes();
  163. }
  164. static void kmemtrace_headers(struct seq_file *s)
  165. {
  166. /* Don't need headers for the original kmemtrace output */
  167. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  168. return;
  169. seq_printf(s, "#\n");
  170. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  171. " POINTER NODE CALLER\n");
  172. seq_printf(s, "# FREE | | | | "
  173. " | | | |\n");
  174. seq_printf(s, "# |\n\n");
  175. }
  176. /*
  177. * The following functions give the original output from kmemtrace,
  178. * plus the origin CPU, since reordering occurs in-kernel now.
  179. */
  180. #define KMEMTRACE_USER_ALLOC 0
  181. #define KMEMTRACE_USER_FREE 1
  182. struct kmemtrace_user_event {
  183. u8 event_id;
  184. u8 type_id;
  185. u16 event_size;
  186. u32 cpu;
  187. u64 timestamp;
  188. unsigned long call_site;
  189. unsigned long ptr;
  190. };
  191. struct kmemtrace_user_event_alloc {
  192. size_t bytes_req;
  193. size_t bytes_alloc;
  194. unsigned gfp_flags;
  195. int node;
  196. };
  197. static enum print_line_t
  198. kmemtrace_print_alloc_user(struct trace_iterator *iter,
  199. struct kmemtrace_alloc_entry *entry)
  200. {
  201. struct kmemtrace_user_event_alloc *ev_alloc;
  202. struct trace_seq *s = &iter->seq;
  203. struct kmemtrace_user_event *ev;
  204. ev = trace_seq_reserve(s, sizeof(*ev));
  205. if (!ev)
  206. return TRACE_TYPE_PARTIAL_LINE;
  207. ev->event_id = KMEMTRACE_USER_ALLOC;
  208. ev->type_id = entry->type_id;
  209. ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
  210. ev->cpu = iter->cpu;
  211. ev->timestamp = iter->ts;
  212. ev->call_site = entry->call_site;
  213. ev->ptr = (unsigned long)entry->ptr;
  214. ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
  215. if (!ev_alloc)
  216. return TRACE_TYPE_PARTIAL_LINE;
  217. ev_alloc->bytes_req = entry->bytes_req;
  218. ev_alloc->bytes_alloc = entry->bytes_alloc;
  219. ev_alloc->gfp_flags = entry->gfp_flags;
  220. ev_alloc->node = entry->node;
  221. return TRACE_TYPE_HANDLED;
  222. }
  223. static enum print_line_t
  224. kmemtrace_print_free_user(struct trace_iterator *iter,
  225. struct kmemtrace_free_entry *entry)
  226. {
  227. struct trace_seq *s = &iter->seq;
  228. struct kmemtrace_user_event *ev;
  229. ev = trace_seq_reserve(s, sizeof(*ev));
  230. if (!ev)
  231. return TRACE_TYPE_PARTIAL_LINE;
  232. ev->event_id = KMEMTRACE_USER_FREE;
  233. ev->type_id = entry->type_id;
  234. ev->event_size = sizeof(*ev);
  235. ev->cpu = iter->cpu;
  236. ev->timestamp = iter->ts;
  237. ev->call_site = entry->call_site;
  238. ev->ptr = (unsigned long)entry->ptr;
  239. return TRACE_TYPE_HANDLED;
  240. }
  241. /* The two other following provide a more minimalistic output */
  242. static enum print_line_t
  243. kmemtrace_print_alloc_compress(struct trace_iterator *iter,
  244. struct kmemtrace_alloc_entry *entry)
  245. {
  246. struct trace_seq *s = &iter->seq;
  247. int ret;
  248. /* Alloc entry */
  249. ret = trace_seq_printf(s, " + ");
  250. if (!ret)
  251. return TRACE_TYPE_PARTIAL_LINE;
  252. /* Type */
  253. switch (entry->type_id) {
  254. case KMEMTRACE_TYPE_KMALLOC:
  255. ret = trace_seq_printf(s, "K ");
  256. break;
  257. case KMEMTRACE_TYPE_CACHE:
  258. ret = trace_seq_printf(s, "C ");
  259. break;
  260. case KMEMTRACE_TYPE_PAGES:
  261. ret = trace_seq_printf(s, "P ");
  262. break;
  263. default:
  264. ret = trace_seq_printf(s, "? ");
  265. }
  266. if (!ret)
  267. return TRACE_TYPE_PARTIAL_LINE;
  268. /* Requested */
  269. ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
  270. if (!ret)
  271. return TRACE_TYPE_PARTIAL_LINE;
  272. /* Allocated */
  273. ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
  274. if (!ret)
  275. return TRACE_TYPE_PARTIAL_LINE;
  276. /* Flags
  277. * TODO: would be better to see the name of the GFP flag names
  278. */
  279. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  280. if (!ret)
  281. return TRACE_TYPE_PARTIAL_LINE;
  282. /* Pointer to allocated */
  283. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  284. if (!ret)
  285. return TRACE_TYPE_PARTIAL_LINE;
  286. /* Node */
  287. ret = trace_seq_printf(s, "%4d ", entry->node);
  288. if (!ret)
  289. return TRACE_TYPE_PARTIAL_LINE;
  290. /* Call site */
  291. ret = seq_print_ip_sym(s, entry->call_site, 0);
  292. if (!ret)
  293. return TRACE_TYPE_PARTIAL_LINE;
  294. if (!trace_seq_printf(s, "\n"))
  295. return TRACE_TYPE_PARTIAL_LINE;
  296. return TRACE_TYPE_HANDLED;
  297. }
  298. static enum print_line_t
  299. kmemtrace_print_free_compress(struct trace_iterator *iter,
  300. struct kmemtrace_free_entry *entry)
  301. {
  302. struct trace_seq *s = &iter->seq;
  303. int ret;
  304. /* Free entry */
  305. ret = trace_seq_printf(s, " - ");
  306. if (!ret)
  307. return TRACE_TYPE_PARTIAL_LINE;
  308. /* Type */
  309. switch (entry->type_id) {
  310. case KMEMTRACE_TYPE_KMALLOC:
  311. ret = trace_seq_printf(s, "K ");
  312. break;
  313. case KMEMTRACE_TYPE_CACHE:
  314. ret = trace_seq_printf(s, "C ");
  315. break;
  316. case KMEMTRACE_TYPE_PAGES:
  317. ret = trace_seq_printf(s, "P ");
  318. break;
  319. default:
  320. ret = trace_seq_printf(s, "? ");
  321. }
  322. if (!ret)
  323. return TRACE_TYPE_PARTIAL_LINE;
  324. /* Skip requested/allocated/flags */
  325. ret = trace_seq_printf(s, " ");
  326. if (!ret)
  327. return TRACE_TYPE_PARTIAL_LINE;
  328. /* Pointer to allocated */
  329. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  330. if (!ret)
  331. return TRACE_TYPE_PARTIAL_LINE;
  332. /* Skip node */
  333. ret = trace_seq_printf(s, " ");
  334. if (!ret)
  335. return TRACE_TYPE_PARTIAL_LINE;
  336. /* Call site */
  337. ret = seq_print_ip_sym(s, entry->call_site, 0);
  338. if (!ret)
  339. return TRACE_TYPE_PARTIAL_LINE;
  340. if (!trace_seq_printf(s, "\n"))
  341. return TRACE_TYPE_PARTIAL_LINE;
  342. return TRACE_TYPE_HANDLED;
  343. }
  344. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  345. {
  346. struct trace_entry *entry = iter->ent;
  347. switch (entry->type) {
  348. case TRACE_KMEM_ALLOC: {
  349. struct kmemtrace_alloc_entry *field;
  350. trace_assign_type(field, entry);
  351. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  352. return kmemtrace_print_alloc_compress(iter, field);
  353. else
  354. return kmemtrace_print_alloc_user(iter, field);
  355. }
  356. case TRACE_KMEM_FREE: {
  357. struct kmemtrace_free_entry *field;
  358. trace_assign_type(field, entry);
  359. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  360. return kmemtrace_print_free_compress(iter, field);
  361. else
  362. return kmemtrace_print_free_user(iter, field);
  363. }
  364. default:
  365. return TRACE_TYPE_UNHANDLED;
  366. }
  367. }
  368. static struct tracer kmem_tracer __read_mostly = {
  369. .name = "kmemtrace",
  370. .init = kmem_trace_init,
  371. .reset = kmem_trace_reset,
  372. .print_line = kmemtrace_print_line,
  373. .print_header = kmemtrace_headers,
  374. .flags = &kmem_tracer_flags
  375. };
  376. void kmemtrace_init(void)
  377. {
  378. /* earliest opportunity to start kmem tracing */
  379. }
  380. static int __init init_kmem_tracer(void)
  381. {
  382. return register_tracer(&kmem_tracer);
  383. }
  384. device_initcall(init_kmem_tracer);