kmemtrace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/tracepoint.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/dcache.h>
  12. #include <linux/fs.h>
  13. #include <linux/kmemtrace.h>
  14. #include "trace_output.h"
  15. #include "trace.h"
  16. /* Select an alternative, minimalistic output than the original one */
  17. #define TRACE_KMEM_OPT_MINIMAL 0x1
  18. static struct tracer_opt kmem_opts[] = {
  19. /* Default disable the minimalistic output */
  20. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  21. { }
  22. };
  23. static struct tracer_flags kmem_tracer_flags = {
  24. .val = 0,
  25. .opts = kmem_opts
  26. };
  27. static struct trace_array *kmemtrace_array;
  28. /* Trace allocations */
  29. static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
  30. unsigned long call_site,
  31. const void *ptr,
  32. size_t bytes_req,
  33. size_t bytes_alloc,
  34. gfp_t gfp_flags,
  35. int node)
  36. {
  37. struct ftrace_event_call *call = &event_kmem_alloc;
  38. struct trace_array *tr = kmemtrace_array;
  39. struct kmemtrace_alloc_entry *entry;
  40. struct ring_buffer_event *event;
  41. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  42. if (!event)
  43. return;
  44. entry = ring_buffer_event_data(event);
  45. tracing_generic_entry_update(&entry->ent, 0, 0);
  46. entry->ent.type = TRACE_KMEM_ALLOC;
  47. entry->type_id = type_id;
  48. entry->call_site = call_site;
  49. entry->ptr = ptr;
  50. entry->bytes_req = bytes_req;
  51. entry->bytes_alloc = bytes_alloc;
  52. entry->gfp_flags = gfp_flags;
  53. entry->node = node;
  54. if (!filter_check_discard(call, entry, tr->buffer, event))
  55. ring_buffer_unlock_commit(tr->buffer, event);
  56. trace_wake_up();
  57. }
  58. static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
  59. unsigned long call_site,
  60. const void *ptr)
  61. {
  62. struct ftrace_event_call *call = &event_kmem_free;
  63. struct trace_array *tr = kmemtrace_array;
  64. struct kmemtrace_free_entry *entry;
  65. struct ring_buffer_event *event;
  66. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  67. if (!event)
  68. return;
  69. entry = ring_buffer_event_data(event);
  70. tracing_generic_entry_update(&entry->ent, 0, 0);
  71. entry->ent.type = TRACE_KMEM_FREE;
  72. entry->type_id = type_id;
  73. entry->call_site = call_site;
  74. entry->ptr = ptr;
  75. if (!filter_check_discard(call, entry, tr->buffer, event))
  76. ring_buffer_unlock_commit(tr->buffer, event);
  77. trace_wake_up();
  78. }
  79. static void kmemtrace_kmalloc(void *ignore,
  80. unsigned long call_site,
  81. const void *ptr,
  82. size_t bytes_req,
  83. size_t bytes_alloc,
  84. gfp_t gfp_flags)
  85. {
  86. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  87. bytes_req, bytes_alloc, gfp_flags, -1);
  88. }
  89. static void kmemtrace_kmem_cache_alloc(void *ignore,
  90. unsigned long call_site,
  91. const void *ptr,
  92. size_t bytes_req,
  93. size_t bytes_alloc,
  94. gfp_t gfp_flags)
  95. {
  96. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  97. bytes_req, bytes_alloc, gfp_flags, -1);
  98. }
  99. static void kmemtrace_kmalloc_node(void *ignore,
  100. unsigned long call_site,
  101. const void *ptr,
  102. size_t bytes_req,
  103. size_t bytes_alloc,
  104. gfp_t gfp_flags,
  105. int node)
  106. {
  107. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  108. bytes_req, bytes_alloc, gfp_flags, node);
  109. }
  110. static void kmemtrace_kmem_cache_alloc_node(void *ignore,
  111. unsigned long call_site,
  112. const void *ptr,
  113. size_t bytes_req,
  114. size_t bytes_alloc,
  115. gfp_t gfp_flags,
  116. int node)
  117. {
  118. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  119. bytes_req, bytes_alloc, gfp_flags, node);
  120. }
  121. static void
  122. kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr)
  123. {
  124. kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
  125. }
  126. static void kmemtrace_kmem_cache_free(void *ignore,
  127. unsigned long call_site, const void *ptr)
  128. {
  129. kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
  130. }
  131. static int kmemtrace_start_probes(void)
  132. {
  133. int err;
  134. err = register_trace_kmalloc(kmemtrace_kmalloc, NULL);
  135. if (err)
  136. return err;
  137. err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
  138. if (err)
  139. return err;
  140. err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
  141. if (err)
  142. return err;
  143. err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
  144. if (err)
  145. return err;
  146. err = register_trace_kfree(kmemtrace_kfree, NULL);
  147. if (err)
  148. return err;
  149. err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
  150. return err;
  151. }
  152. static void kmemtrace_stop_probes(void)
  153. {
  154. unregister_trace_kmalloc(kmemtrace_kmalloc, NULL);
  155. unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
  156. unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
  157. unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
  158. unregister_trace_kfree(kmemtrace_kfree, NULL);
  159. unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
  160. }
  161. static int kmem_trace_init(struct trace_array *tr)
  162. {
  163. kmemtrace_array = tr;
  164. tracing_reset_online_cpus(tr);
  165. kmemtrace_start_probes();
  166. return 0;
  167. }
  168. static void kmem_trace_reset(struct trace_array *tr)
  169. {
  170. kmemtrace_stop_probes();
  171. }
  172. static void kmemtrace_headers(struct seq_file *s)
  173. {
  174. /* Don't need headers for the original kmemtrace output */
  175. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  176. return;
  177. seq_printf(s, "#\n");
  178. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  179. " POINTER NODE CALLER\n");
  180. seq_printf(s, "# FREE | | | | "
  181. " | | | |\n");
  182. seq_printf(s, "# |\n\n");
  183. }
  184. /*
  185. * The following functions give the original output from kmemtrace,
  186. * plus the origin CPU, since reordering occurs in-kernel now.
  187. */
  188. #define KMEMTRACE_USER_ALLOC 0
  189. #define KMEMTRACE_USER_FREE 1
  190. struct kmemtrace_user_event {
  191. u8 event_id;
  192. u8 type_id;
  193. u16 event_size;
  194. u32 cpu;
  195. u64 timestamp;
  196. unsigned long call_site;
  197. unsigned long ptr;
  198. };
  199. struct kmemtrace_user_event_alloc {
  200. size_t bytes_req;
  201. size_t bytes_alloc;
  202. unsigned gfp_flags;
  203. int node;
  204. };
  205. static enum print_line_t
  206. kmemtrace_print_alloc(struct trace_iterator *iter, int flags,
  207. struct trace_event *event)
  208. {
  209. struct trace_seq *s = &iter->seq;
  210. struct kmemtrace_alloc_entry *entry;
  211. int ret;
  212. trace_assign_type(entry, iter->ent);
  213. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
  214. "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
  215. entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
  216. (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
  217. (unsigned long)entry->gfp_flags, entry->node);
  218. if (!ret)
  219. return TRACE_TYPE_PARTIAL_LINE;
  220. return TRACE_TYPE_HANDLED;
  221. }
  222. static enum print_line_t
  223. kmemtrace_print_free(struct trace_iterator *iter, int flags,
  224. struct trace_event *event)
  225. {
  226. struct trace_seq *s = &iter->seq;
  227. struct kmemtrace_free_entry *entry;
  228. int ret;
  229. trace_assign_type(entry, iter->ent);
  230. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
  231. entry->type_id, (void *)entry->call_site,
  232. (unsigned long)entry->ptr);
  233. if (!ret)
  234. return TRACE_TYPE_PARTIAL_LINE;
  235. return TRACE_TYPE_HANDLED;
  236. }
  237. static enum print_line_t
  238. kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags,
  239. struct trace_event *event)
  240. {
  241. struct trace_seq *s = &iter->seq;
  242. struct kmemtrace_alloc_entry *entry;
  243. struct kmemtrace_user_event *ev;
  244. struct kmemtrace_user_event_alloc *ev_alloc;
  245. trace_assign_type(entry, iter->ent);
  246. ev = trace_seq_reserve(s, sizeof(*ev));
  247. if (!ev)
  248. return TRACE_TYPE_PARTIAL_LINE;
  249. ev->event_id = KMEMTRACE_USER_ALLOC;
  250. ev->type_id = entry->type_id;
  251. ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
  252. ev->cpu = iter->cpu;
  253. ev->timestamp = iter->ts;
  254. ev->call_site = entry->call_site;
  255. ev->ptr = (unsigned long)entry->ptr;
  256. ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
  257. if (!ev_alloc)
  258. return TRACE_TYPE_PARTIAL_LINE;
  259. ev_alloc->bytes_req = entry->bytes_req;
  260. ev_alloc->bytes_alloc = entry->bytes_alloc;
  261. ev_alloc->gfp_flags = entry->gfp_flags;
  262. ev_alloc->node = entry->node;
  263. return TRACE_TYPE_HANDLED;
  264. }
  265. static enum print_line_t
  266. kmemtrace_print_free_user(struct trace_iterator *iter, int flags,
  267. struct trace_event *event)
  268. {
  269. struct trace_seq *s = &iter->seq;
  270. struct kmemtrace_free_entry *entry;
  271. struct kmemtrace_user_event *ev;
  272. trace_assign_type(entry, iter->ent);
  273. ev = trace_seq_reserve(s, sizeof(*ev));
  274. if (!ev)
  275. return TRACE_TYPE_PARTIAL_LINE;
  276. ev->event_id = KMEMTRACE_USER_FREE;
  277. ev->type_id = entry->type_id;
  278. ev->event_size = sizeof(*ev);
  279. ev->cpu = iter->cpu;
  280. ev->timestamp = iter->ts;
  281. ev->call_site = entry->call_site;
  282. ev->ptr = (unsigned long)entry->ptr;
  283. return TRACE_TYPE_HANDLED;
  284. }
  285. /* The two other following provide a more minimalistic output */
  286. static enum print_line_t
  287. kmemtrace_print_alloc_compress(struct trace_iterator *iter)
  288. {
  289. struct kmemtrace_alloc_entry *entry;
  290. struct trace_seq *s = &iter->seq;
  291. int ret;
  292. trace_assign_type(entry, iter->ent);
  293. /* Alloc entry */
  294. ret = trace_seq_printf(s, " + ");
  295. if (!ret)
  296. return TRACE_TYPE_PARTIAL_LINE;
  297. /* Type */
  298. switch (entry->type_id) {
  299. case KMEMTRACE_TYPE_KMALLOC:
  300. ret = trace_seq_printf(s, "K ");
  301. break;
  302. case KMEMTRACE_TYPE_CACHE:
  303. ret = trace_seq_printf(s, "C ");
  304. break;
  305. case KMEMTRACE_TYPE_PAGES:
  306. ret = trace_seq_printf(s, "P ");
  307. break;
  308. default:
  309. ret = trace_seq_printf(s, "? ");
  310. }
  311. if (!ret)
  312. return TRACE_TYPE_PARTIAL_LINE;
  313. /* Requested */
  314. ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
  315. if (!ret)
  316. return TRACE_TYPE_PARTIAL_LINE;
  317. /* Allocated */
  318. ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
  319. if (!ret)
  320. return TRACE_TYPE_PARTIAL_LINE;
  321. /* Flags
  322. * TODO: would be better to see the name of the GFP flag names
  323. */
  324. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  325. if (!ret)
  326. return TRACE_TYPE_PARTIAL_LINE;
  327. /* Pointer to allocated */
  328. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  329. if (!ret)
  330. return TRACE_TYPE_PARTIAL_LINE;
  331. /* Node and call site*/
  332. ret = trace_seq_printf(s, "%4d %pf\n", entry->node,
  333. (void *)entry->call_site);
  334. if (!ret)
  335. return TRACE_TYPE_PARTIAL_LINE;
  336. return TRACE_TYPE_HANDLED;
  337. }
  338. static enum print_line_t
  339. kmemtrace_print_free_compress(struct trace_iterator *iter)
  340. {
  341. struct kmemtrace_free_entry *entry;
  342. struct trace_seq *s = &iter->seq;
  343. int ret;
  344. trace_assign_type(entry, iter->ent);
  345. /* Free entry */
  346. ret = trace_seq_printf(s, " - ");
  347. if (!ret)
  348. return TRACE_TYPE_PARTIAL_LINE;
  349. /* Type */
  350. switch (entry->type_id) {
  351. case KMEMTRACE_TYPE_KMALLOC:
  352. ret = trace_seq_printf(s, "K ");
  353. break;
  354. case KMEMTRACE_TYPE_CACHE:
  355. ret = trace_seq_printf(s, "C ");
  356. break;
  357. case KMEMTRACE_TYPE_PAGES:
  358. ret = trace_seq_printf(s, "P ");
  359. break;
  360. default:
  361. ret = trace_seq_printf(s, "? ");
  362. }
  363. if (!ret)
  364. return TRACE_TYPE_PARTIAL_LINE;
  365. /* Skip requested/allocated/flags */
  366. ret = trace_seq_printf(s, " ");
  367. if (!ret)
  368. return TRACE_TYPE_PARTIAL_LINE;
  369. /* Pointer to allocated */
  370. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  371. if (!ret)
  372. return TRACE_TYPE_PARTIAL_LINE;
  373. /* Skip node and print call site*/
  374. ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site);
  375. if (!ret)
  376. return TRACE_TYPE_PARTIAL_LINE;
  377. return TRACE_TYPE_HANDLED;
  378. }
  379. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  380. {
  381. struct trace_entry *entry = iter->ent;
  382. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  383. return TRACE_TYPE_UNHANDLED;
  384. switch (entry->type) {
  385. case TRACE_KMEM_ALLOC:
  386. return kmemtrace_print_alloc_compress(iter);
  387. case TRACE_KMEM_FREE:
  388. return kmemtrace_print_free_compress(iter);
  389. default:
  390. return TRACE_TYPE_UNHANDLED;
  391. }
  392. }
  393. static struct trace_event_functions kmem_trace_alloc_funcs = {
  394. .trace = kmemtrace_print_alloc,
  395. .binary = kmemtrace_print_alloc_user,
  396. };
  397. static struct trace_event kmem_trace_alloc = {
  398. .type = TRACE_KMEM_ALLOC,
  399. .funcs = &kmem_trace_alloc_funcs,
  400. };
  401. static struct trace_event_functions kmem_trace_free_funcs = {
  402. .trace = kmemtrace_print_free,
  403. .binary = kmemtrace_print_free_user,
  404. };
  405. static struct trace_event kmem_trace_free = {
  406. .type = TRACE_KMEM_FREE,
  407. .funcs = &kmem_trace_free_funcs,
  408. };
  409. static struct tracer kmem_tracer __read_mostly = {
  410. .name = "kmemtrace",
  411. .init = kmem_trace_init,
  412. .reset = kmem_trace_reset,
  413. .print_line = kmemtrace_print_line,
  414. .print_header = kmemtrace_headers,
  415. .flags = &kmem_tracer_flags
  416. };
  417. void kmemtrace_init(void)
  418. {
  419. /* earliest opportunity to start kmem tracing */
  420. }
  421. static int __init init_kmem_tracer(void)
  422. {
  423. if (!register_ftrace_event(&kmem_trace_alloc)) {
  424. pr_warning("Warning: could not register kmem events\n");
  425. return 1;
  426. }
  427. if (!register_ftrace_event(&kmem_trace_free)) {
  428. pr_warning("Warning: could not register kmem events\n");
  429. return 1;
  430. }
  431. if (register_tracer(&kmem_tracer) != 0) {
  432. pr_warning("Warning: could not register the kmem tracer\n");
  433. return 1;
  434. }
  435. return 0;
  436. }
  437. device_initcall(init_kmem_tracer);