kmemtrace.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/tracepoint.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/dcache.h>
  12. #include <linux/fs.h>
  13. #include <linux/kmemtrace.h>
  14. #include "trace_output.h"
  15. #include "trace.h"
  16. /* Select an alternative, minimalistic output than the original one */
  17. #define TRACE_KMEM_OPT_MINIMAL 0x1
  18. static struct tracer_opt kmem_opts[] = {
  19. /* Default disable the minimalistic output */
  20. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  21. { }
  22. };
  23. static struct tracer_flags kmem_tracer_flags = {
  24. .val = 0,
  25. .opts = kmem_opts
  26. };
  27. static struct trace_array *kmemtrace_array;
  28. /* Trace allocations */
  29. static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
  30. unsigned long call_site,
  31. const void *ptr,
  32. size_t bytes_req,
  33. size_t bytes_alloc,
  34. gfp_t gfp_flags,
  35. int node)
  36. {
  37. struct ftrace_event_call *call = &event_kmem_alloc;
  38. struct trace_array *tr = kmemtrace_array;
  39. struct kmemtrace_alloc_entry *entry;
  40. struct ring_buffer_event *event;
  41. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  42. if (!event)
  43. return;
  44. entry = ring_buffer_event_data(event);
  45. tracing_generic_entry_update(&entry->ent, 0, 0);
  46. entry->ent.type = TRACE_KMEM_ALLOC;
  47. entry->type_id = type_id;
  48. entry->call_site = call_site;
  49. entry->ptr = ptr;
  50. entry->bytes_req = bytes_req;
  51. entry->bytes_alloc = bytes_alloc;
  52. entry->gfp_flags = gfp_flags;
  53. entry->node = node;
  54. if (!filter_check_discard(call, entry, tr->buffer, event))
  55. ring_buffer_unlock_commit(tr->buffer, event);
  56. trace_wake_up();
  57. }
  58. static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
  59. unsigned long call_site,
  60. const void *ptr)
  61. {
  62. struct ftrace_event_call *call = &event_kmem_free;
  63. struct trace_array *tr = kmemtrace_array;
  64. struct kmemtrace_free_entry *entry;
  65. struct ring_buffer_event *event;
  66. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  67. if (!event)
  68. return;
  69. entry = ring_buffer_event_data(event);
  70. tracing_generic_entry_update(&entry->ent, 0, 0);
  71. entry->ent.type = TRACE_KMEM_FREE;
  72. entry->type_id = type_id;
  73. entry->call_site = call_site;
  74. entry->ptr = ptr;
  75. if (!filter_check_discard(call, entry, tr->buffer, event))
  76. ring_buffer_unlock_commit(tr->buffer, event);
  77. trace_wake_up();
  78. }
  79. static void kmemtrace_kmalloc(unsigned long call_site,
  80. const void *ptr,
  81. size_t bytes_req,
  82. size_t bytes_alloc,
  83. gfp_t gfp_flags)
  84. {
  85. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  86. bytes_req, bytes_alloc, gfp_flags, -1);
  87. }
  88. static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
  89. const void *ptr,
  90. size_t bytes_req,
  91. size_t bytes_alloc,
  92. gfp_t gfp_flags)
  93. {
  94. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  95. bytes_req, bytes_alloc, gfp_flags, -1);
  96. }
  97. static void kmemtrace_kmalloc_node(unsigned long call_site,
  98. const void *ptr,
  99. size_t bytes_req,
  100. size_t bytes_alloc,
  101. gfp_t gfp_flags,
  102. int node)
  103. {
  104. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  105. bytes_req, bytes_alloc, gfp_flags, node);
  106. }
  107. static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
  108. const void *ptr,
  109. size_t bytes_req,
  110. size_t bytes_alloc,
  111. gfp_t gfp_flags,
  112. int node)
  113. {
  114. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  115. bytes_req, bytes_alloc, gfp_flags, node);
  116. }
  117. static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
  118. {
  119. kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
  120. }
  121. static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
  122. {
  123. kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
  124. }
  125. static int kmemtrace_start_probes(void)
  126. {
  127. int err;
  128. err = register_trace_kmalloc(kmemtrace_kmalloc);
  129. if (err)
  130. return err;
  131. err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  132. if (err)
  133. return err;
  134. err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
  135. if (err)
  136. return err;
  137. err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  138. if (err)
  139. return err;
  140. err = register_trace_kfree(kmemtrace_kfree);
  141. if (err)
  142. return err;
  143. err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  144. return err;
  145. }
  146. static void kmemtrace_stop_probes(void)
  147. {
  148. unregister_trace_kmalloc(kmemtrace_kmalloc);
  149. unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  150. unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
  151. unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  152. unregister_trace_kfree(kmemtrace_kfree);
  153. unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  154. }
  155. static int kmem_trace_init(struct trace_array *tr)
  156. {
  157. int cpu;
  158. kmemtrace_array = tr;
  159. for_each_cpu(cpu, cpu_possible_mask)
  160. tracing_reset(tr, cpu);
  161. kmemtrace_start_probes();
  162. return 0;
  163. }
  164. static void kmem_trace_reset(struct trace_array *tr)
  165. {
  166. kmemtrace_stop_probes();
  167. }
  168. static void kmemtrace_headers(struct seq_file *s)
  169. {
  170. /* Don't need headers for the original kmemtrace output */
  171. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  172. return;
  173. seq_printf(s, "#\n");
  174. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  175. " POINTER NODE CALLER\n");
  176. seq_printf(s, "# FREE | | | | "
  177. " | | | |\n");
  178. seq_printf(s, "# |\n\n");
  179. }
  180. /*
  181. * The following functions give the original output from kmemtrace,
  182. * plus the origin CPU, since reordering occurs in-kernel now.
  183. */
  184. #define KMEMTRACE_USER_ALLOC 0
  185. #define KMEMTRACE_USER_FREE 1
  186. struct kmemtrace_user_event {
  187. u8 event_id;
  188. u8 type_id;
  189. u16 event_size;
  190. u32 cpu;
  191. u64 timestamp;
  192. unsigned long call_site;
  193. unsigned long ptr;
  194. };
  195. struct kmemtrace_user_event_alloc {
  196. size_t bytes_req;
  197. size_t bytes_alloc;
  198. unsigned gfp_flags;
  199. int node;
  200. };
  201. static enum print_line_t
  202. kmemtrace_print_alloc_user(struct trace_iterator *iter,
  203. struct kmemtrace_alloc_entry *entry)
  204. {
  205. struct kmemtrace_user_event_alloc *ev_alloc;
  206. struct trace_seq *s = &iter->seq;
  207. struct kmemtrace_user_event *ev;
  208. ev = trace_seq_reserve(s, sizeof(*ev));
  209. if (!ev)
  210. return TRACE_TYPE_PARTIAL_LINE;
  211. ev->event_id = KMEMTRACE_USER_ALLOC;
  212. ev->type_id = entry->type_id;
  213. ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
  214. ev->cpu = iter->cpu;
  215. ev->timestamp = iter->ts;
  216. ev->call_site = entry->call_site;
  217. ev->ptr = (unsigned long)entry->ptr;
  218. ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
  219. if (!ev_alloc)
  220. return TRACE_TYPE_PARTIAL_LINE;
  221. ev_alloc->bytes_req = entry->bytes_req;
  222. ev_alloc->bytes_alloc = entry->bytes_alloc;
  223. ev_alloc->gfp_flags = entry->gfp_flags;
  224. ev_alloc->node = entry->node;
  225. return TRACE_TYPE_HANDLED;
  226. }
  227. static enum print_line_t
  228. kmemtrace_print_free_user(struct trace_iterator *iter,
  229. struct kmemtrace_free_entry *entry)
  230. {
  231. struct trace_seq *s = &iter->seq;
  232. struct kmemtrace_user_event *ev;
  233. ev = trace_seq_reserve(s, sizeof(*ev));
  234. if (!ev)
  235. return TRACE_TYPE_PARTIAL_LINE;
  236. ev->event_id = KMEMTRACE_USER_FREE;
  237. ev->type_id = entry->type_id;
  238. ev->event_size = sizeof(*ev);
  239. ev->cpu = iter->cpu;
  240. ev->timestamp = iter->ts;
  241. ev->call_site = entry->call_site;
  242. ev->ptr = (unsigned long)entry->ptr;
  243. return TRACE_TYPE_HANDLED;
  244. }
  245. /* The two other following provide a more minimalistic output */
  246. static enum print_line_t
  247. kmemtrace_print_alloc_compress(struct trace_iterator *iter,
  248. struct kmemtrace_alloc_entry *entry)
  249. {
  250. struct trace_seq *s = &iter->seq;
  251. int ret;
  252. /* Alloc entry */
  253. ret = trace_seq_printf(s, " + ");
  254. if (!ret)
  255. return TRACE_TYPE_PARTIAL_LINE;
  256. /* Type */
  257. switch (entry->type_id) {
  258. case KMEMTRACE_TYPE_KMALLOC:
  259. ret = trace_seq_printf(s, "K ");
  260. break;
  261. case KMEMTRACE_TYPE_CACHE:
  262. ret = trace_seq_printf(s, "C ");
  263. break;
  264. case KMEMTRACE_TYPE_PAGES:
  265. ret = trace_seq_printf(s, "P ");
  266. break;
  267. default:
  268. ret = trace_seq_printf(s, "? ");
  269. }
  270. if (!ret)
  271. return TRACE_TYPE_PARTIAL_LINE;
  272. /* Requested */
  273. ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
  274. if (!ret)
  275. return TRACE_TYPE_PARTIAL_LINE;
  276. /* Allocated */
  277. ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
  278. if (!ret)
  279. return TRACE_TYPE_PARTIAL_LINE;
  280. /* Flags
  281. * TODO: would be better to see the name of the GFP flag names
  282. */
  283. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  284. if (!ret)
  285. return TRACE_TYPE_PARTIAL_LINE;
  286. /* Pointer to allocated */
  287. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  288. if (!ret)
  289. return TRACE_TYPE_PARTIAL_LINE;
  290. /* Node */
  291. ret = trace_seq_printf(s, "%4d ", entry->node);
  292. if (!ret)
  293. return TRACE_TYPE_PARTIAL_LINE;
  294. /* Call site */
  295. ret = seq_print_ip_sym(s, entry->call_site, 0);
  296. if (!ret)
  297. return TRACE_TYPE_PARTIAL_LINE;
  298. if (!trace_seq_printf(s, "\n"))
  299. return TRACE_TYPE_PARTIAL_LINE;
  300. return TRACE_TYPE_HANDLED;
  301. }
  302. static enum print_line_t
  303. kmemtrace_print_free_compress(struct trace_iterator *iter,
  304. struct kmemtrace_free_entry *entry)
  305. {
  306. struct trace_seq *s = &iter->seq;
  307. int ret;
  308. /* Free entry */
  309. ret = trace_seq_printf(s, " - ");
  310. if (!ret)
  311. return TRACE_TYPE_PARTIAL_LINE;
  312. /* Type */
  313. switch (entry->type_id) {
  314. case KMEMTRACE_TYPE_KMALLOC:
  315. ret = trace_seq_printf(s, "K ");
  316. break;
  317. case KMEMTRACE_TYPE_CACHE:
  318. ret = trace_seq_printf(s, "C ");
  319. break;
  320. case KMEMTRACE_TYPE_PAGES:
  321. ret = trace_seq_printf(s, "P ");
  322. break;
  323. default:
  324. ret = trace_seq_printf(s, "? ");
  325. }
  326. if (!ret)
  327. return TRACE_TYPE_PARTIAL_LINE;
  328. /* Skip requested/allocated/flags */
  329. ret = trace_seq_printf(s, " ");
  330. if (!ret)
  331. return TRACE_TYPE_PARTIAL_LINE;
  332. /* Pointer to allocated */
  333. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  334. if (!ret)
  335. return TRACE_TYPE_PARTIAL_LINE;
  336. /* Skip node */
  337. ret = trace_seq_printf(s, " ");
  338. if (!ret)
  339. return TRACE_TYPE_PARTIAL_LINE;
  340. /* Call site */
  341. ret = seq_print_ip_sym(s, entry->call_site, 0);
  342. if (!ret)
  343. return TRACE_TYPE_PARTIAL_LINE;
  344. if (!trace_seq_printf(s, "\n"))
  345. return TRACE_TYPE_PARTIAL_LINE;
  346. return TRACE_TYPE_HANDLED;
  347. }
  348. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  349. {
  350. struct trace_entry *entry = iter->ent;
  351. switch (entry->type) {
  352. case TRACE_KMEM_ALLOC: {
  353. struct kmemtrace_alloc_entry *field;
  354. trace_assign_type(field, entry);
  355. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  356. return kmemtrace_print_alloc_compress(iter, field);
  357. else
  358. return kmemtrace_print_alloc_user(iter, field);
  359. }
  360. case TRACE_KMEM_FREE: {
  361. struct kmemtrace_free_entry *field;
  362. trace_assign_type(field, entry);
  363. if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
  364. return kmemtrace_print_free_compress(iter, field);
  365. else
  366. return kmemtrace_print_free_user(iter, field);
  367. }
  368. default:
  369. return TRACE_TYPE_UNHANDLED;
  370. }
  371. }
  372. static struct tracer kmem_tracer __read_mostly = {
  373. .name = "kmemtrace",
  374. .init = kmem_trace_init,
  375. .reset = kmem_trace_reset,
  376. .print_line = kmemtrace_print_line,
  377. .print_header = kmemtrace_headers,
  378. .flags = &kmem_tracer_flags
  379. };
  380. void kmemtrace_init(void)
  381. {
  382. /* earliest opportunity to start kmem tracing */
  383. }
  384. static int __init init_kmem_tracer(void)
  385. {
  386. return register_tracer(&kmem_tracer);
  387. }
  388. device_initcall(init_kmem_tracer);