kmemtrace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/tracepoint.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/dcache.h>
  12. #include <linux/fs.h>
  13. #include <linux/kmemtrace.h>
  14. #include "trace_output.h"
  15. #include "trace.h"
  16. /* Select an alternative, minimalistic output than the original one */
  17. #define TRACE_KMEM_OPT_MINIMAL 0x1
  18. static struct tracer_opt kmem_opts[] = {
  19. /* Default disable the minimalistic output */
  20. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  21. { }
  22. };
  23. static struct tracer_flags kmem_tracer_flags = {
  24. .val = 0,
  25. .opts = kmem_opts
  26. };
  27. static struct trace_array *kmemtrace_array;
  28. /* Trace allocations */
  29. static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
  30. unsigned long call_site,
  31. const void *ptr,
  32. size_t bytes_req,
  33. size_t bytes_alloc,
  34. gfp_t gfp_flags,
  35. int node)
  36. {
  37. struct ftrace_event_call *call = &event_kmem_alloc;
  38. struct trace_array *tr = kmemtrace_array;
  39. struct kmemtrace_alloc_entry *entry;
  40. struct ring_buffer_event *event;
  41. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  42. if (!event)
  43. return;
  44. entry = ring_buffer_event_data(event);
  45. tracing_generic_entry_update(&entry->ent, 0, 0);
  46. entry->ent.type = TRACE_KMEM_ALLOC;
  47. entry->type_id = type_id;
  48. entry->call_site = call_site;
  49. entry->ptr = ptr;
  50. entry->bytes_req = bytes_req;
  51. entry->bytes_alloc = bytes_alloc;
  52. entry->gfp_flags = gfp_flags;
  53. entry->node = node;
  54. if (!filter_check_discard(call, entry, tr->buffer, event))
  55. ring_buffer_unlock_commit(tr->buffer, event);
  56. trace_wake_up();
  57. }
  58. static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
  59. unsigned long call_site,
  60. const void *ptr)
  61. {
  62. struct ftrace_event_call *call = &event_kmem_free;
  63. struct trace_array *tr = kmemtrace_array;
  64. struct kmemtrace_free_entry *entry;
  65. struct ring_buffer_event *event;
  66. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  67. if (!event)
  68. return;
  69. entry = ring_buffer_event_data(event);
  70. tracing_generic_entry_update(&entry->ent, 0, 0);
  71. entry->ent.type = TRACE_KMEM_FREE;
  72. entry->type_id = type_id;
  73. entry->call_site = call_site;
  74. entry->ptr = ptr;
  75. if (!filter_check_discard(call, entry, tr->buffer, event))
  76. ring_buffer_unlock_commit(tr->buffer, event);
  77. trace_wake_up();
  78. }
  79. static void kmemtrace_kmalloc(void *ignore,
  80. unsigned long call_site,
  81. const void *ptr,
  82. size_t bytes_req,
  83. size_t bytes_alloc,
  84. gfp_t gfp_flags)
  85. {
  86. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  87. bytes_req, bytes_alloc, gfp_flags, -1);
  88. }
  89. static void kmemtrace_kmem_cache_alloc(void *ignore,
  90. unsigned long call_site,
  91. const void *ptr,
  92. size_t bytes_req,
  93. size_t bytes_alloc,
  94. gfp_t gfp_flags)
  95. {
  96. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  97. bytes_req, bytes_alloc, gfp_flags, -1);
  98. }
  99. static void kmemtrace_kmalloc_node(void *ignore,
  100. unsigned long call_site,
  101. const void *ptr,
  102. size_t bytes_req,
  103. size_t bytes_alloc,
  104. gfp_t gfp_flags,
  105. int node)
  106. {
  107. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  108. bytes_req, bytes_alloc, gfp_flags, node);
  109. }
  110. static void kmemtrace_kmem_cache_alloc_node(void *ignore,
  111. unsigned long call_site,
  112. const void *ptr,
  113. size_t bytes_req,
  114. size_t bytes_alloc,
  115. gfp_t gfp_flags,
  116. int node)
  117. {
  118. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  119. bytes_req, bytes_alloc, gfp_flags, node);
  120. }
  121. static void
  122. kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr)
  123. {
  124. kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
  125. }
  126. static void kmemtrace_kmem_cache_free(void *ignore,
  127. unsigned long call_site, const void *ptr)
  128. {
  129. kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
  130. }
  131. static int kmemtrace_start_probes(void)
  132. {
  133. int err;
  134. err = register_trace_kmalloc(kmemtrace_kmalloc, NULL);
  135. if (err)
  136. return err;
  137. err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
  138. if (err)
  139. return err;
  140. err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
  141. if (err)
  142. return err;
  143. err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
  144. if (err)
  145. return err;
  146. err = register_trace_kfree(kmemtrace_kfree, NULL);
  147. if (err)
  148. return err;
  149. err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
  150. return err;
  151. }
  152. static void kmemtrace_stop_probes(void)
  153. {
  154. unregister_trace_kmalloc(kmemtrace_kmalloc, NULL);
  155. unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
  156. unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
  157. unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
  158. unregister_trace_kfree(kmemtrace_kfree, NULL);
  159. unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
  160. }
  161. static int kmem_trace_init(struct trace_array *tr)
  162. {
  163. kmemtrace_array = tr;
  164. tracing_reset_online_cpus(tr);
  165. kmemtrace_start_probes();
  166. return 0;
  167. }
  168. static void kmem_trace_reset(struct trace_array *tr)
  169. {
  170. kmemtrace_stop_probes();
  171. }
  172. static void kmemtrace_headers(struct seq_file *s)
  173. {
  174. /* Don't need headers for the original kmemtrace output */
  175. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  176. return;
  177. seq_printf(s, "#\n");
  178. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  179. " POINTER NODE CALLER\n");
  180. seq_printf(s, "# FREE | | | | "
  181. " | | | |\n");
  182. seq_printf(s, "# |\n\n");
  183. }
  184. /*
  185. * The following functions give the original output from kmemtrace,
  186. * plus the origin CPU, since reordering occurs in-kernel now.
  187. */
  188. #define KMEMTRACE_USER_ALLOC 0
  189. #define KMEMTRACE_USER_FREE 1
  190. struct kmemtrace_user_event {
  191. u8 event_id;
  192. u8 type_id;
  193. u16 event_size;
  194. u32 cpu;
  195. u64 timestamp;
  196. unsigned long call_site;
  197. unsigned long ptr;
  198. };
  199. struct kmemtrace_user_event_alloc {
  200. size_t bytes_req;
  201. size_t bytes_alloc;
  202. unsigned gfp_flags;
  203. int node;
  204. };
  205. static enum print_line_t
  206. kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
  207. {
  208. struct trace_seq *s = &iter->seq;
  209. struct kmemtrace_alloc_entry *entry;
  210. int ret;
  211. trace_assign_type(entry, iter->ent);
  212. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
  213. "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
  214. entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
  215. (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
  216. (unsigned long)entry->gfp_flags, entry->node);
  217. if (!ret)
  218. return TRACE_TYPE_PARTIAL_LINE;
  219. return TRACE_TYPE_HANDLED;
  220. }
  221. static enum print_line_t
  222. kmemtrace_print_free(struct trace_iterator *iter, int flags)
  223. {
  224. struct trace_seq *s = &iter->seq;
  225. struct kmemtrace_free_entry *entry;
  226. int ret;
  227. trace_assign_type(entry, iter->ent);
  228. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
  229. entry->type_id, (void *)entry->call_site,
  230. (unsigned long)entry->ptr);
  231. if (!ret)
  232. return TRACE_TYPE_PARTIAL_LINE;
  233. return TRACE_TYPE_HANDLED;
  234. }
  235. static enum print_line_t
  236. kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
  237. {
  238. struct trace_seq *s = &iter->seq;
  239. struct kmemtrace_alloc_entry *entry;
  240. struct kmemtrace_user_event *ev;
  241. struct kmemtrace_user_event_alloc *ev_alloc;
  242. trace_assign_type(entry, iter->ent);
  243. ev = trace_seq_reserve(s, sizeof(*ev));
  244. if (!ev)
  245. return TRACE_TYPE_PARTIAL_LINE;
  246. ev->event_id = KMEMTRACE_USER_ALLOC;
  247. ev->type_id = entry->type_id;
  248. ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
  249. ev->cpu = iter->cpu;
  250. ev->timestamp = iter->ts;
  251. ev->call_site = entry->call_site;
  252. ev->ptr = (unsigned long)entry->ptr;
  253. ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
  254. if (!ev_alloc)
  255. return TRACE_TYPE_PARTIAL_LINE;
  256. ev_alloc->bytes_req = entry->bytes_req;
  257. ev_alloc->bytes_alloc = entry->bytes_alloc;
  258. ev_alloc->gfp_flags = entry->gfp_flags;
  259. ev_alloc->node = entry->node;
  260. return TRACE_TYPE_HANDLED;
  261. }
  262. static enum print_line_t
  263. kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
  264. {
  265. struct trace_seq *s = &iter->seq;
  266. struct kmemtrace_free_entry *entry;
  267. struct kmemtrace_user_event *ev;
  268. trace_assign_type(entry, iter->ent);
  269. ev = trace_seq_reserve(s, sizeof(*ev));
  270. if (!ev)
  271. return TRACE_TYPE_PARTIAL_LINE;
  272. ev->event_id = KMEMTRACE_USER_FREE;
  273. ev->type_id = entry->type_id;
  274. ev->event_size = sizeof(*ev);
  275. ev->cpu = iter->cpu;
  276. ev->timestamp = iter->ts;
  277. ev->call_site = entry->call_site;
  278. ev->ptr = (unsigned long)entry->ptr;
  279. return TRACE_TYPE_HANDLED;
  280. }
  281. /* The two other following provide a more minimalistic output */
  282. static enum print_line_t
  283. kmemtrace_print_alloc_compress(struct trace_iterator *iter)
  284. {
  285. struct kmemtrace_alloc_entry *entry;
  286. struct trace_seq *s = &iter->seq;
  287. int ret;
  288. trace_assign_type(entry, iter->ent);
  289. /* Alloc entry */
  290. ret = trace_seq_printf(s, " + ");
  291. if (!ret)
  292. return TRACE_TYPE_PARTIAL_LINE;
  293. /* Type */
  294. switch (entry->type_id) {
  295. case KMEMTRACE_TYPE_KMALLOC:
  296. ret = trace_seq_printf(s, "K ");
  297. break;
  298. case KMEMTRACE_TYPE_CACHE:
  299. ret = trace_seq_printf(s, "C ");
  300. break;
  301. case KMEMTRACE_TYPE_PAGES:
  302. ret = trace_seq_printf(s, "P ");
  303. break;
  304. default:
  305. ret = trace_seq_printf(s, "? ");
  306. }
  307. if (!ret)
  308. return TRACE_TYPE_PARTIAL_LINE;
  309. /* Requested */
  310. ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
  311. if (!ret)
  312. return TRACE_TYPE_PARTIAL_LINE;
  313. /* Allocated */
  314. ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
  315. if (!ret)
  316. return TRACE_TYPE_PARTIAL_LINE;
  317. /* Flags
  318. * TODO: would be better to see the name of the GFP flag names
  319. */
  320. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  321. if (!ret)
  322. return TRACE_TYPE_PARTIAL_LINE;
  323. /* Pointer to allocated */
  324. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  325. if (!ret)
  326. return TRACE_TYPE_PARTIAL_LINE;
  327. /* Node and call site*/
  328. ret = trace_seq_printf(s, "%4d %pf\n", entry->node,
  329. (void *)entry->call_site);
  330. if (!ret)
  331. return TRACE_TYPE_PARTIAL_LINE;
  332. return TRACE_TYPE_HANDLED;
  333. }
  334. static enum print_line_t
  335. kmemtrace_print_free_compress(struct trace_iterator *iter)
  336. {
  337. struct kmemtrace_free_entry *entry;
  338. struct trace_seq *s = &iter->seq;
  339. int ret;
  340. trace_assign_type(entry, iter->ent);
  341. /* Free entry */
  342. ret = trace_seq_printf(s, " - ");
  343. if (!ret)
  344. return TRACE_TYPE_PARTIAL_LINE;
  345. /* Type */
  346. switch (entry->type_id) {
  347. case KMEMTRACE_TYPE_KMALLOC:
  348. ret = trace_seq_printf(s, "K ");
  349. break;
  350. case KMEMTRACE_TYPE_CACHE:
  351. ret = trace_seq_printf(s, "C ");
  352. break;
  353. case KMEMTRACE_TYPE_PAGES:
  354. ret = trace_seq_printf(s, "P ");
  355. break;
  356. default:
  357. ret = trace_seq_printf(s, "? ");
  358. }
  359. if (!ret)
  360. return TRACE_TYPE_PARTIAL_LINE;
  361. /* Skip requested/allocated/flags */
  362. ret = trace_seq_printf(s, " ");
  363. if (!ret)
  364. return TRACE_TYPE_PARTIAL_LINE;
  365. /* Pointer to allocated */
  366. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  367. if (!ret)
  368. return TRACE_TYPE_PARTIAL_LINE;
  369. /* Skip node and print call site*/
  370. ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site);
  371. if (!ret)
  372. return TRACE_TYPE_PARTIAL_LINE;
  373. return TRACE_TYPE_HANDLED;
  374. }
  375. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  376. {
  377. struct trace_entry *entry = iter->ent;
  378. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  379. return TRACE_TYPE_UNHANDLED;
  380. switch (entry->type) {
  381. case TRACE_KMEM_ALLOC:
  382. return kmemtrace_print_alloc_compress(iter);
  383. case TRACE_KMEM_FREE:
  384. return kmemtrace_print_free_compress(iter);
  385. default:
  386. return TRACE_TYPE_UNHANDLED;
  387. }
  388. }
  389. static struct trace_event kmem_trace_alloc = {
  390. .type = TRACE_KMEM_ALLOC,
  391. .trace = kmemtrace_print_alloc,
  392. .binary = kmemtrace_print_alloc_user,
  393. };
  394. static struct trace_event kmem_trace_free = {
  395. .type = TRACE_KMEM_FREE,
  396. .trace = kmemtrace_print_free,
  397. .binary = kmemtrace_print_free_user,
  398. };
  399. static struct tracer kmem_tracer __read_mostly = {
  400. .name = "kmemtrace",
  401. .init = kmem_trace_init,
  402. .reset = kmem_trace_reset,
  403. .print_line = kmemtrace_print_line,
  404. .print_header = kmemtrace_headers,
  405. .flags = &kmem_tracer_flags
  406. };
  407. void kmemtrace_init(void)
  408. {
  409. /* earliest opportunity to start kmem tracing */
  410. }
  411. static int __init init_kmem_tracer(void)
  412. {
  413. if (!register_ftrace_event(&kmem_trace_alloc)) {
  414. pr_warning("Warning: could not register kmem events\n");
  415. return 1;
  416. }
  417. if (!register_ftrace_event(&kmem_trace_free)) {
  418. pr_warning("Warning: could not register kmem events\n");
  419. return 1;
  420. }
  421. if (register_tracer(&kmem_tracer) != 0) {
  422. pr_warning("Warning: could not register the kmem tracer\n");
  423. return 1;
  424. }
  425. return 0;
  426. }
  427. device_initcall(init_kmem_tracer);