kmemtrace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. /*
  2. * Memory allocator tracing
  3. *
  4. * Copyright (C) 2008 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
  6. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  7. */
  8. #include <linux/tracepoint.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/dcache.h>
  12. #include <linux/fs.h>
  13. #include <linux/kmemtrace.h>
  14. #include "trace_output.h"
  15. #include "trace.h"
  16. /* Select an alternative, minimalistic output than the original one */
  17. #define TRACE_KMEM_OPT_MINIMAL 0x1
  18. static struct tracer_opt kmem_opts[] = {
  19. /* Default disable the minimalistic output */
  20. { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
  21. { }
  22. };
  23. static struct tracer_flags kmem_tracer_flags = {
  24. .val = 0,
  25. .opts = kmem_opts
  26. };
  27. static struct trace_array *kmemtrace_array;
  28. /* Trace allocations */
  29. static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
  30. unsigned long call_site,
  31. const void *ptr,
  32. size_t bytes_req,
  33. size_t bytes_alloc,
  34. gfp_t gfp_flags,
  35. int node)
  36. {
  37. struct ftrace_event_call *call = &event_kmem_alloc;
  38. struct trace_array *tr = kmemtrace_array;
  39. struct kmemtrace_alloc_entry *entry;
  40. struct ring_buffer_event *event;
  41. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  42. if (!event)
  43. return;
  44. entry = ring_buffer_event_data(event);
  45. tracing_generic_entry_update(&entry->ent, 0, 0);
  46. entry->ent.type = TRACE_KMEM_ALLOC;
  47. entry->type_id = type_id;
  48. entry->call_site = call_site;
  49. entry->ptr = ptr;
  50. entry->bytes_req = bytes_req;
  51. entry->bytes_alloc = bytes_alloc;
  52. entry->gfp_flags = gfp_flags;
  53. entry->node = node;
  54. if (!filter_check_discard(call, entry, tr->buffer, event))
  55. ring_buffer_unlock_commit(tr->buffer, event);
  56. trace_wake_up();
  57. }
  58. static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
  59. unsigned long call_site,
  60. const void *ptr)
  61. {
  62. struct ftrace_event_call *call = &event_kmem_free;
  63. struct trace_array *tr = kmemtrace_array;
  64. struct kmemtrace_free_entry *entry;
  65. struct ring_buffer_event *event;
  66. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
  67. if (!event)
  68. return;
  69. entry = ring_buffer_event_data(event);
  70. tracing_generic_entry_update(&entry->ent, 0, 0);
  71. entry->ent.type = TRACE_KMEM_FREE;
  72. entry->type_id = type_id;
  73. entry->call_site = call_site;
  74. entry->ptr = ptr;
  75. if (!filter_check_discard(call, entry, tr->buffer, event))
  76. ring_buffer_unlock_commit(tr->buffer, event);
  77. trace_wake_up();
  78. }
  79. static void kmemtrace_kmalloc(unsigned long call_site,
  80. const void *ptr,
  81. size_t bytes_req,
  82. size_t bytes_alloc,
  83. gfp_t gfp_flags)
  84. {
  85. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  86. bytes_req, bytes_alloc, gfp_flags, -1);
  87. }
  88. static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
  89. const void *ptr,
  90. size_t bytes_req,
  91. size_t bytes_alloc,
  92. gfp_t gfp_flags)
  93. {
  94. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  95. bytes_req, bytes_alloc, gfp_flags, -1);
  96. }
  97. static void kmemtrace_kmalloc_node(unsigned long call_site,
  98. const void *ptr,
  99. size_t bytes_req,
  100. size_t bytes_alloc,
  101. gfp_t gfp_flags,
  102. int node)
  103. {
  104. kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
  105. bytes_req, bytes_alloc, gfp_flags, node);
  106. }
  107. static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
  108. const void *ptr,
  109. size_t bytes_req,
  110. size_t bytes_alloc,
  111. gfp_t gfp_flags,
  112. int node)
  113. {
  114. kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
  115. bytes_req, bytes_alloc, gfp_flags, node);
  116. }
  117. static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
  118. {
  119. kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
  120. }
  121. static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
  122. {
  123. kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
  124. }
  125. static int kmemtrace_start_probes(void)
  126. {
  127. int err;
  128. err = register_trace_kmalloc(kmemtrace_kmalloc);
  129. if (err)
  130. return err;
  131. err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  132. if (err)
  133. return err;
  134. err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
  135. if (err)
  136. return err;
  137. err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  138. if (err)
  139. return err;
  140. err = register_trace_kfree(kmemtrace_kfree);
  141. if (err)
  142. return err;
  143. err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  144. return err;
  145. }
  146. static void kmemtrace_stop_probes(void)
  147. {
  148. unregister_trace_kmalloc(kmemtrace_kmalloc);
  149. unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
  150. unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
  151. unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
  152. unregister_trace_kfree(kmemtrace_kfree);
  153. unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
  154. }
  155. static int kmem_trace_init(struct trace_array *tr)
  156. {
  157. int cpu;
  158. kmemtrace_array = tr;
  159. for_each_cpu(cpu, cpu_possible_mask)
  160. tracing_reset(tr, cpu);
  161. kmemtrace_start_probes();
  162. return 0;
  163. }
  164. static void kmem_trace_reset(struct trace_array *tr)
  165. {
  166. kmemtrace_stop_probes();
  167. }
  168. static void kmemtrace_headers(struct seq_file *s)
  169. {
  170. /* Don't need headers for the original kmemtrace output */
  171. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  172. return;
  173. seq_printf(s, "#\n");
  174. seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
  175. " POINTER NODE CALLER\n");
  176. seq_printf(s, "# FREE | | | | "
  177. " | | | |\n");
  178. seq_printf(s, "# |\n\n");
  179. }
  180. /*
  181. * The following functions give the original output from kmemtrace,
  182. * plus the origin CPU, since reordering occurs in-kernel now.
  183. */
  184. #define KMEMTRACE_USER_ALLOC 0
  185. #define KMEMTRACE_USER_FREE 1
  186. struct kmemtrace_user_event {
  187. u8 event_id;
  188. u8 type_id;
  189. u16 event_size;
  190. u32 cpu;
  191. u64 timestamp;
  192. unsigned long call_site;
  193. unsigned long ptr;
  194. };
  195. struct kmemtrace_user_event_alloc {
  196. size_t bytes_req;
  197. size_t bytes_alloc;
  198. unsigned gfp_flags;
  199. int node;
  200. };
  201. static enum print_line_t
  202. kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
  203. {
  204. struct trace_seq *s = &iter->seq;
  205. struct kmemtrace_alloc_entry *entry;
  206. int ret;
  207. trace_assign_type(entry, iter->ent);
  208. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
  209. "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
  210. entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
  211. (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
  212. (unsigned long)entry->gfp_flags, entry->node);
  213. if (!ret)
  214. return TRACE_TYPE_PARTIAL_LINE;
  215. return TRACE_TYPE_HANDLED;
  216. }
  217. static enum print_line_t
  218. kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
  219. {
  220. struct trace_seq *s = &iter->seq;
  221. struct kmemtrace_free_entry *entry;
  222. int ret;
  223. trace_assign_type(entry, iter->ent);
  224. ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
  225. entry->type_id, (void *)entry->call_site,
  226. (unsigned long)entry->ptr);
  227. if (!ret)
  228. return TRACE_TYPE_PARTIAL_LINE;
  229. return TRACE_TYPE_HANDLED;
  230. }
  231. static enum print_line_t
  232. kmemtrace_print_alloc_user_bin(struct trace_iterator *iter, int flags)
  233. {
  234. struct trace_seq *s = &iter->seq;
  235. struct kmemtrace_alloc_entry *entry;
  236. struct kmemtrace_user_event *ev;
  237. struct kmemtrace_user_event_alloc *ev_alloc;
  238. trace_assign_type(entry, iter->ent);
  239. ev = trace_seq_reserve(s, sizeof(*ev));
  240. if (!ev)
  241. return TRACE_TYPE_PARTIAL_LINE;
  242. ev->event_id = KMEMTRACE_USER_ALLOC;
  243. ev->type_id = entry->type_id;
  244. ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
  245. ev->cpu = iter->cpu;
  246. ev->timestamp = iter->ts;
  247. ev->call_site = entry->call_site;
  248. ev->ptr = (unsigned long)entry->ptr;
  249. ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
  250. if (!ev_alloc)
  251. return TRACE_TYPE_PARTIAL_LINE;
  252. ev_alloc->bytes_req = entry->bytes_req;
  253. ev_alloc->bytes_alloc = entry->bytes_alloc;
  254. ev_alloc->gfp_flags = entry->gfp_flags;
  255. ev_alloc->node = entry->node;
  256. return TRACE_TYPE_HANDLED;
  257. }
  258. static enum print_line_t
  259. kmemtrace_print_free_user_bin(struct trace_iterator *iter, int flags)
  260. {
  261. struct trace_seq *s = &iter->seq;
  262. struct kmemtrace_free_entry *entry;
  263. struct kmemtrace_user_event *ev;
  264. trace_assign_type(entry, iter->ent);
  265. ev = trace_seq_reserve(s, sizeof(*ev));
  266. if (!ev)
  267. return TRACE_TYPE_PARTIAL_LINE;
  268. ev->event_id = KMEMTRACE_USER_FREE;
  269. ev->type_id = entry->type_id;
  270. ev->event_size = sizeof(*ev);
  271. ev->cpu = iter->cpu;
  272. ev->timestamp = iter->ts;
  273. ev->call_site = entry->call_site;
  274. ev->ptr = (unsigned long)entry->ptr;
  275. return TRACE_TYPE_HANDLED;
  276. }
  277. /* The two other following provide a more minimalistic output */
  278. static enum print_line_t
  279. kmemtrace_print_alloc_compress(struct trace_iterator *iter)
  280. {
  281. struct kmemtrace_alloc_entry *entry;
  282. struct trace_seq *s = &iter->seq;
  283. int ret;
  284. trace_assign_type(entry, iter->ent);
  285. /* Alloc entry */
  286. ret = trace_seq_printf(s, " + ");
  287. if (!ret)
  288. return TRACE_TYPE_PARTIAL_LINE;
  289. /* Type */
  290. switch (entry->type_id) {
  291. case KMEMTRACE_TYPE_KMALLOC:
  292. ret = trace_seq_printf(s, "K ");
  293. break;
  294. case KMEMTRACE_TYPE_CACHE:
  295. ret = trace_seq_printf(s, "C ");
  296. break;
  297. case KMEMTRACE_TYPE_PAGES:
  298. ret = trace_seq_printf(s, "P ");
  299. break;
  300. default:
  301. ret = trace_seq_printf(s, "? ");
  302. }
  303. if (!ret)
  304. return TRACE_TYPE_PARTIAL_LINE;
  305. /* Requested */
  306. ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
  307. if (!ret)
  308. return TRACE_TYPE_PARTIAL_LINE;
  309. /* Allocated */
  310. ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
  311. if (!ret)
  312. return TRACE_TYPE_PARTIAL_LINE;
  313. /* Flags
  314. * TODO: would be better to see the name of the GFP flag names
  315. */
  316. ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
  317. if (!ret)
  318. return TRACE_TYPE_PARTIAL_LINE;
  319. /* Pointer to allocated */
  320. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  321. if (!ret)
  322. return TRACE_TYPE_PARTIAL_LINE;
  323. /* Node */
  324. ret = trace_seq_printf(s, "%4d ", entry->node);
  325. if (!ret)
  326. return TRACE_TYPE_PARTIAL_LINE;
  327. /* Call site */
  328. ret = seq_print_ip_sym(s, entry->call_site, 0);
  329. if (!ret)
  330. return TRACE_TYPE_PARTIAL_LINE;
  331. if (!trace_seq_printf(s, "\n"))
  332. return TRACE_TYPE_PARTIAL_LINE;
  333. return TRACE_TYPE_HANDLED;
  334. }
  335. static enum print_line_t
  336. kmemtrace_print_free_compress(struct trace_iterator *iter)
  337. {
  338. struct kmemtrace_free_entry *entry;
  339. struct trace_seq *s = &iter->seq;
  340. int ret;
  341. trace_assign_type(entry, iter->ent);
  342. /* Free entry */
  343. ret = trace_seq_printf(s, " - ");
  344. if (!ret)
  345. return TRACE_TYPE_PARTIAL_LINE;
  346. /* Type */
  347. switch (entry->type_id) {
  348. case KMEMTRACE_TYPE_KMALLOC:
  349. ret = trace_seq_printf(s, "K ");
  350. break;
  351. case KMEMTRACE_TYPE_CACHE:
  352. ret = trace_seq_printf(s, "C ");
  353. break;
  354. case KMEMTRACE_TYPE_PAGES:
  355. ret = trace_seq_printf(s, "P ");
  356. break;
  357. default:
  358. ret = trace_seq_printf(s, "? ");
  359. }
  360. if (!ret)
  361. return TRACE_TYPE_PARTIAL_LINE;
  362. /* Skip requested/allocated/flags */
  363. ret = trace_seq_printf(s, " ");
  364. if (!ret)
  365. return TRACE_TYPE_PARTIAL_LINE;
  366. /* Pointer to allocated */
  367. ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
  368. if (!ret)
  369. return TRACE_TYPE_PARTIAL_LINE;
  370. /* Skip node */
  371. ret = trace_seq_printf(s, " ");
  372. if (!ret)
  373. return TRACE_TYPE_PARTIAL_LINE;
  374. /* Call site */
  375. ret = seq_print_ip_sym(s, entry->call_site, 0);
  376. if (!ret)
  377. return TRACE_TYPE_PARTIAL_LINE;
  378. if (!trace_seq_printf(s, "\n"))
  379. return TRACE_TYPE_PARTIAL_LINE;
  380. return TRACE_TYPE_HANDLED;
  381. }
  382. static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
  383. {
  384. struct trace_entry *entry = iter->ent;
  385. if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
  386. return TRACE_TYPE_UNHANDLED;
  387. switch (entry->type) {
  388. case TRACE_KMEM_ALLOC:
  389. return kmemtrace_print_alloc_compress(iter);
  390. case TRACE_KMEM_FREE:
  391. return kmemtrace_print_free_compress(iter);
  392. default:
  393. return TRACE_TYPE_UNHANDLED;
  394. }
  395. }
  396. static struct trace_event kmem_trace_alloc = {
  397. .type = TRACE_KMEM_ALLOC,
  398. .trace = kmemtrace_print_alloc_user,
  399. .binary = kmemtrace_print_alloc_user_bin,
  400. };
  401. static struct trace_event kmem_trace_free = {
  402. .type = TRACE_KMEM_FREE,
  403. .trace = kmemtrace_print_free_user,
  404. .binary = kmemtrace_print_free_user_bin,
  405. };
  406. static struct tracer kmem_tracer __read_mostly = {
  407. .name = "kmemtrace",
  408. .init = kmem_trace_init,
  409. .reset = kmem_trace_reset,
  410. .print_line = kmemtrace_print_line,
  411. .print_header = kmemtrace_headers,
  412. .flags = &kmem_tracer_flags
  413. };
  414. void kmemtrace_init(void)
  415. {
  416. /* earliest opportunity to start kmem tracing */
  417. }
  418. static int __init init_kmem_tracer(void)
  419. {
  420. if (!register_ftrace_event(&kmem_trace_alloc)) {
  421. pr_warning("Warning: could not register kmem events\n");
  422. return 1;
  423. }
  424. if (!register_ftrace_event(&kmem_trace_free)) {
  425. pr_warning("Warning: could not register kmem events\n");
  426. return 1;
  427. }
  428. if (!register_tracer(&kmem_tracer)) {
  429. pr_warning("Warning: could not register the kmem tracer\n");
  430. return 1;
  431. }
  432. return 0;
  433. }
  434. device_initcall(init_kmem_tracer);