kmemtrace.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/tracepoint.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/dcache.h>
  12. #include <linux/fs.h>
  13. #include <linux/kmemtrace.h>
  14. #include "trace_output.h"
  15. #include "trace.h"
  16. /* Select an alternative, minimalistic output than the original one */
  17. #define TRACE_KMEM_OPT_MINIMAL 0x1
  18. static struct tracer_opt kmem_opts[] = {
  19. /* Default disable the minimalistic output */
  20. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  21. { }
  22. };
  23. static struct tracer_flags kmem_tracer_flags = {
  24. .val = 0,
  25. .opts = kmem_opts
  26. };
  27. static struct trace_array *kmemtrace_array;
  28. /* Trace allocations */
  29. static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
  30. unsigned long call_site,
  31. const void *ptr,
  32. size_t bytes_req,
  33. size_t bytes_alloc,
  34. gfp_t gfp_flags,
  35. int node)
  36. {
  37. struct ftrace_event_call *call = &event_kmem_alloc;
  38. struct trace_array *tr = kmemtrace_array;
  39. struct kmemtrace_alloc_entry *entry;
  40. struct ring_buffer_event *event;
  41. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  42. if (!event)
  43. return;
  44. entry = ring_buffer_event_data(event);
  45. tracing_generic_entry_update(&entry->ent, 0, 0);
  46. entry->ent.type = TRACE_KMEM_ALLOC;
  47. entry->type_id = type_id;
  48. entry->call_site = call_site;
  49. entry->ptr = ptr;
  50. entry->bytes_req = bytes_req;
  51. entry->bytes_alloc = bytes_alloc;
  52. entry->gfp_flags = gfp_flags;
  53. entry->node = node;
  54. if (!filter_check_discard(call, entry, tr->buffer, event))
  55. ring_buffer_unlock_commit(tr->buffer, event);
  56. trace_wake_up();
  57. }
  58. static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
  59. unsigned long call_site,
  60. const void *ptr)
  61. {
  62. struct ftrace_event_call *call = &event_kmem_free;
  63. struct trace_array *tr = kmemtrace_array;
  64. struct kmemtrace_free_entry *entry;
  65. struct ring_buffer_event *event;
  66. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  67. if (!event)
  68. return;
  69. entry = ring_buffer_event_data(event);
  70. tracing_generic_entry_update(&entry->ent, 0, 0);
  71. entry->ent.type = TRACE_KMEM_FREE;
  72. entry->type_id = type_id;
  73. entry->call_site = call_site;
  74. entry->ptr = ptr;
  75. if (!filter_check_discard(call, entry, tr->buffer, event))
  76. ring_buffer_unlock_commit(tr->buffer, event);
  77. trace_wake_up();
  78. }
  79. static void kmemtrace_kmalloc(unsigned long call_site,
  80. const void *ptr,
  81. size_t bytes_req,
  82. size_t bytes_alloc,
  83. gfp_t gfp_flags)
  84. {
  85. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  86. bytes_req, bytes_alloc, gfp_flags, -1);
  87. }
  88. static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
  89. const void *ptr,
  90. size_t bytes_req,
  91. size_t bytes_alloc,
  92. gfp_t gfp_flags)
  93. {
  94. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  95. bytes_req, bytes_alloc, gfp_flags, -1);
  96. }
  97. static void kmemtrace_kmalloc_node(unsigned long call_site,
  98. const void *ptr,
  99. size_t bytes_req,
  100. size_t bytes_alloc,
  101. gfp_t gfp_flags,
  102. int node)
  103. {
  104. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  105. bytes_req, bytes_alloc, gfp_flags, node);
  106. }
  107. static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
  108. const void *ptr,
  109. size_t bytes_req,
  110. size_t bytes_alloc,
  111. gfp_t gfp_flags,
  112. int node)
  113. {
  114. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  115. bytes_req, bytes_alloc, gfp_flags, node);
  116. }
  117. static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
  118. {
  119. kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
  120. }
  121. static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
  122. {
  123. kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
  124. }
  125. static int kmemtrace_start_probes(void)
  126. {
  127. int err;
  128. err = register_trace_kmalloc(kmemtrace_kmalloc);
  129. if (err)
  130. return err;
  131. err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  132. if (err)
  133. return err;
  134. err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
  135. if (err)
  136. return err;
  137. err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  138. if (err)
  139. return err;
  140. err = register_trace_kfree(kmemtrace_kfree);
  141. if (err)
  142. return err;
  143. err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  144. return err;
  145. }
  146. static void kmemtrace_stop_probes(void)
  147. {
  148. unregister_trace_kmalloc(kmemtrace_kmalloc);
  149. unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  150. unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
  151. unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  152. unregister_trace_kfree(kmemtrace_kfree);
  153. unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  154. }
  155. static int kmem_trace_init(struct trace_array *tr)
  156. {
  157. kmemtrace_array = tr;
  158. tracing_reset_online_cpus(tr);
  159. kmemtrace_start_probes();
  160. return 0;
  161. }
  162. static void kmem_trace_reset(struct trace_array *tr)
  163. {
  164. kmemtrace_stop_probes();
  165. }
  166. static void kmemtrace_headers(struct seq_file *s)
  167. {
  168. /* Don't need headers for the original kmemtrace output */
  169. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  170. return;
  171. seq_printf(s, "#\n");
  172. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  173. " POINTER NODE CALLER\n");
  174. seq_printf(s, "# FREE | | | | "
  175. " | | | |\n");
  176. seq_printf(s, "# |\n\n");
  177. }
  178. /*
  179. * The following functions give the original output from kmemtrace,
  180. * plus the origin CPU, since reordering occurs in-kernel now.
  181. */
  182. #define KMEMTRACE_USER_ALLOC 0
  183. #define KMEMTRACE_USER_FREE 1
  184. struct kmemtrace_user_event {
  185. u8 event_id;
  186. u8 type_id;
  187. u16 event_size;
  188. u32 cpu;
  189. u64 timestamp;
  190. unsigned long call_site;
  191. unsigned long ptr;
  192. };
  193. struct kmemtrace_user_event_alloc {
  194. size_t bytes_req;
  195. size_t bytes_alloc;
  196. unsigned gfp_flags;
  197. int node;
  198. };
  199. static enum print_line_t
  200. kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
  201. {
  202. struct trace_seq *s = &iter->seq;
  203. struct kmemtrace_alloc_entry *entry;
  204. int ret;
  205. trace_assign_type(entry, iter->ent);
  206. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
  207. "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
  208. entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
  209. (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
  210. (unsigned long)entry->gfp_flags, entry->node);
  211. if (!ret)
  212. return TRACE_TYPE_PARTIAL_LINE;
  213. return TRACE_TYPE_HANDLED;
  214. }
  215. static enum print_line_t
  216. kmemtrace_print_free(struct trace_iterator *iter, int flags)
  217. {
  218. struct trace_seq *s = &iter->seq;
  219. struct kmemtrace_free_entry *entry;
  220. int ret;
  221. trace_assign_type(entry, iter->ent);
  222. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
  223. entry->type_id, (void *)entry->call_site,
  224. (unsigned long)entry->ptr);
  225. if (!ret)
  226. return TRACE_TYPE_PARTIAL_LINE;
  227. return TRACE_TYPE_HANDLED;
  228. }
  229. static enum print_line_t
  230. kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
  231. {
  232. struct trace_seq *s = &iter->seq;
  233. struct kmemtrace_alloc_entry *entry;
  234. struct kmemtrace_user_event *ev;
  235. struct kmemtrace_user_event_alloc *ev_alloc;
  236. trace_assign_type(entry, iter->ent);
  237. ev = trace_seq_reserve(s, sizeof(*ev));
  238. if (!ev)
  239. return TRACE_TYPE_PARTIAL_LINE;
  240. ev->event_id = KMEMTRACE_USER_ALLOC;
  241. ev->type_id = entry->type_id;
  242. ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
  243. ev->cpu = iter->cpu;
  244. ev->timestamp = iter->ts;
  245. ev->call_site = entry->call_site;
  246. ev->ptr = (unsigned long)entry->ptr;
  247. ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
  248. if (!ev_alloc)
  249. return TRACE_TYPE_PARTIAL_LINE;
  250. ev_alloc->bytes_req = entry->bytes_req;
  251. ev_alloc->bytes_alloc = entry->bytes_alloc;
  252. ev_alloc->gfp_flags = entry->gfp_flags;
  253. ev_alloc->node = entry->node;
  254. return TRACE_TYPE_HANDLED;
  255. }
  256. static enum print_line_t
  257. kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
  258. {
  259. struct trace_seq *s = &iter->seq;
  260. struct kmemtrace_free_entry *entry;
  261. struct kmemtrace_user_event *ev;
  262. trace_assign_type(entry, iter->ent);
  263. ev = trace_seq_reserve(s, sizeof(*ev));
  264. if (!ev)
  265. return TRACE_TYPE_PARTIAL_LINE;
  266. ev->event_id = KMEMTRACE_USER_FREE;
  267. ev->type_id = entry->type_id;
  268. ev->event_size = sizeof(*ev);
  269. ev->cpu = iter->cpu;
  270. ev->timestamp = iter->ts;
  271. ev->call_site = entry->call_site;
  272. ev->ptr = (unsigned long)entry->ptr;
  273. return TRACE_TYPE_HANDLED;
  274. }
  275. /* The two other following provide a more minimalistic output */
  276. static enum print_line_t
  277. kmemtrace_print_alloc_compress(struct trace_iterator *iter)
  278. {
  279. struct kmemtrace_alloc_entry *entry;
  280. struct trace_seq *s = &iter->seq;
  281. int ret;
  282. trace_assign_type(entry, iter->ent);
  283. /* Alloc entry */
  284. ret = trace_seq_printf(s, " + ");
  285. if (!ret)
  286. return TRACE_TYPE_PARTIAL_LINE;
  287. /* Type */
  288. switch (entry->type_id) {
  289. case KMEMTRACE_TYPE_KMALLOC:
  290. ret = trace_seq_printf(s, "K ");
  291. break;
  292. case KMEMTRACE_TYPE_CACHE:
  293. ret = trace_seq_printf(s, "C ");
  294. break;
  295. case KMEMTRACE_TYPE_PAGES:
  296. ret = trace_seq_printf(s, "P ");
  297. break;
  298. default:
  299. ret = trace_seq_printf(s, "? ");
  300. }
  301. if (!ret)
  302. return TRACE_TYPE_PARTIAL_LINE;
  303. /* Requested */
  304. ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
  305. if (!ret)
  306. return TRACE_TYPE_PARTIAL_LINE;
  307. /* Allocated */
  308. ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
  309. if (!ret)
  310. return TRACE_TYPE_PARTIAL_LINE;
  311. /* Flags
  312. * TODO: would be better to see the name of the GFP flag names
  313. */
  314. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  315. if (!ret)
  316. return TRACE_TYPE_PARTIAL_LINE;
  317. /* Pointer to allocated */
  318. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  319. if (!ret)
  320. return TRACE_TYPE_PARTIAL_LINE;
  321. /* Node and call site*/
  322. ret = trace_seq_printf(s, "%4d %pf\n", entry->node,
  323. (void *)entry->call_site);
  324. if (!ret)
  325. return TRACE_TYPE_PARTIAL_LINE;
  326. return TRACE_TYPE_HANDLED;
  327. }
  328. static enum print_line_t
  329. kmemtrace_print_free_compress(struct trace_iterator *iter)
  330. {
  331. struct kmemtrace_free_entry *entry;
  332. struct trace_seq *s = &iter->seq;
  333. int ret;
  334. trace_assign_type(entry, iter->ent);
  335. /* Free entry */
  336. ret = trace_seq_printf(s, " - ");
  337. if (!ret)
  338. return TRACE_TYPE_PARTIAL_LINE;
  339. /* Type */
  340. switch (entry->type_id) {
  341. case KMEMTRACE_TYPE_KMALLOC:
  342. ret = trace_seq_printf(s, "K ");
  343. break;
  344. case KMEMTRACE_TYPE_CACHE:
  345. ret = trace_seq_printf(s, "C ");
  346. break;
  347. case KMEMTRACE_TYPE_PAGES:
  348. ret = trace_seq_printf(s, "P ");
  349. break;
  350. default:
  351. ret = trace_seq_printf(s, "? ");
  352. }
  353. if (!ret)
  354. return TRACE_TYPE_PARTIAL_LINE;
  355. /* Skip requested/allocated/flags */
  356. ret = trace_seq_printf(s, " ");
  357. if (!ret)
  358. return TRACE_TYPE_PARTIAL_LINE;
  359. /* Pointer to allocated */
  360. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  361. if (!ret)
  362. return TRACE_TYPE_PARTIAL_LINE;
  363. /* Skip node and print call site*/
  364. ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site);
  365. if (!ret)
  366. return TRACE_TYPE_PARTIAL_LINE;
  367. return TRACE_TYPE_HANDLED;
  368. }
  369. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  370. {
  371. struct trace_entry *entry = iter->ent;
  372. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  373. return TRACE_TYPE_UNHANDLED;
  374. switch (entry->type) {
  375. case TRACE_KMEM_ALLOC:
  376. return kmemtrace_print_alloc_compress(iter);
  377. case TRACE_KMEM_FREE:
  378. return kmemtrace_print_free_compress(iter);
  379. default:
  380. return TRACE_TYPE_UNHANDLED;
  381. }
  382. }
  383. static struct trace_event kmem_trace_alloc = {
  384. .type = TRACE_KMEM_ALLOC,
  385. .trace = kmemtrace_print_alloc,
  386. .binary = kmemtrace_print_alloc_user,
  387. };
  388. static struct trace_event kmem_trace_free = {
  389. .type = TRACE_KMEM_FREE,
  390. .trace = kmemtrace_print_free,
  391. .binary = kmemtrace_print_free_user,
  392. };
  393. static struct tracer kmem_tracer __read_mostly = {
  394. .name = "kmemtrace",
  395. .init = kmem_trace_init,
  396. .reset = kmem_trace_reset,
  397. .print_line = kmemtrace_print_line,
  398. .print_header = kmemtrace_headers,
  399. .flags = &kmem_tracer_flags
  400. };
  401. void kmemtrace_init(void)
  402. {
  403. /* earliest opportunity to start kmem tracing */
  404. }
  405. static int __init init_kmem_tracer(void)
  406. {
  407. if (!register_ftrace_event(&kmem_trace_alloc)) {
  408. pr_warning("Warning: could not register kmem events\n");
  409. return 1;
  410. }
  411. if (!register_ftrace_event(&kmem_trace_free)) {
  412. pr_warning("Warning: could not register kmem events\n");
  413. return 1;
  414. }
  415. if (register_tracer(&kmem_tracer) != 0) {
  416. pr_warning("Warning: could not register the kmem tracer\n");
  417. return 1;
  418. }
  419. return 0;
  420. }
  421. device_initcall(init_kmem_tracer);