builtin-kmem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. #include "builtin.h"
  2. #include "perf.h"
  3. #include "util/evlist.h"
  4. #include "util/evsel.h"
  5. #include "util/util.h"
  6. #include "util/cache.h"
  7. #include "util/symbol.h"
  8. #include "util/thread.h"
  9. #include "util/header.h"
  10. #include "util/session.h"
  11. #include "util/tool.h"
  12. #include "util/parse-options.h"
  13. #include "util/trace-event.h"
  14. #include "util/debug.h"
  15. #include <linux/rbtree.h>
  16. #include <linux/string.h>
  17. struct alloc_stat;
  18. typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
  19. static int alloc_flag;
  20. static int caller_flag;
  21. static int alloc_lines = -1;
  22. static int caller_lines = -1;
  23. static bool raw_ip;
  24. static int *cpunode_map;
  25. static int max_cpu_num;
  26. struct alloc_stat {
  27. u64 call_site;
  28. u64 ptr;
  29. u64 bytes_req;
  30. u64 bytes_alloc;
  31. u32 hit;
  32. u32 pingpong;
  33. short alloc_cpu;
  34. struct rb_node node;
  35. };
  36. static struct rb_root root_alloc_stat;
  37. static struct rb_root root_alloc_sorted;
  38. static struct rb_root root_caller_stat;
  39. static struct rb_root root_caller_sorted;
  40. static unsigned long total_requested, total_allocated;
  41. static unsigned long nr_allocs, nr_cross_allocs;
  42. #define PATH_SYS_NODE "/sys/devices/system/node"
  43. static int init_cpunode_map(void)
  44. {
  45. FILE *fp;
  46. int i, err = -1;
  47. fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
  48. if (!fp) {
  49. max_cpu_num = 4096;
  50. return 0;
  51. }
  52. if (fscanf(fp, "%d", &max_cpu_num) < 1) {
  53. pr_err("Failed to read 'kernel_max' from sysfs");
  54. goto out_close;
  55. }
  56. max_cpu_num++;
  57. cpunode_map = calloc(max_cpu_num, sizeof(int));
  58. if (!cpunode_map) {
  59. pr_err("%s: calloc failed\n", __func__);
  60. goto out_close;
  61. }
  62. for (i = 0; i < max_cpu_num; i++)
  63. cpunode_map[i] = -1;
  64. err = 0;
  65. out_close:
  66. fclose(fp);
  67. return err;
  68. }
  69. static int setup_cpunode_map(void)
  70. {
  71. struct dirent *dent1, *dent2;
  72. DIR *dir1, *dir2;
  73. unsigned int cpu, mem;
  74. char buf[PATH_MAX];
  75. if (init_cpunode_map())
  76. return -1;
  77. dir1 = opendir(PATH_SYS_NODE);
  78. if (!dir1)
  79. return -1;
  80. while ((dent1 = readdir(dir1)) != NULL) {
  81. if (dent1->d_type != DT_DIR ||
  82. sscanf(dent1->d_name, "node%u", &mem) < 1)
  83. continue;
  84. snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
  85. dir2 = opendir(buf);
  86. if (!dir2)
  87. continue;
  88. while ((dent2 = readdir(dir2)) != NULL) {
  89. if (dent2->d_type != DT_LNK ||
  90. sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
  91. continue;
  92. cpunode_map[cpu] = mem;
  93. }
  94. closedir(dir2);
  95. }
  96. closedir(dir1);
  97. return 0;
  98. }
  99. static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
  100. int bytes_req, int bytes_alloc, int cpu)
  101. {
  102. struct rb_node **node = &root_alloc_stat.rb_node;
  103. struct rb_node *parent = NULL;
  104. struct alloc_stat *data = NULL;
  105. while (*node) {
  106. parent = *node;
  107. data = rb_entry(*node, struct alloc_stat, node);
  108. if (ptr > data->ptr)
  109. node = &(*node)->rb_right;
  110. else if (ptr < data->ptr)
  111. node = &(*node)->rb_left;
  112. else
  113. break;
  114. }
  115. if (data && data->ptr == ptr) {
  116. data->hit++;
  117. data->bytes_req += bytes_req;
  118. data->bytes_alloc += bytes_alloc;
  119. } else {
  120. data = malloc(sizeof(*data));
  121. if (!data) {
  122. pr_err("%s: malloc failed\n", __func__);
  123. return -1;
  124. }
  125. data->ptr = ptr;
  126. data->pingpong = 0;
  127. data->hit = 1;
  128. data->bytes_req = bytes_req;
  129. data->bytes_alloc = bytes_alloc;
  130. rb_link_node(&data->node, parent, node);
  131. rb_insert_color(&data->node, &root_alloc_stat);
  132. }
  133. data->call_site = call_site;
  134. data->alloc_cpu = cpu;
  135. return 0;
  136. }
  137. static int insert_caller_stat(unsigned long call_site,
  138. int bytes_req, int bytes_alloc)
  139. {
  140. struct rb_node **node = &root_caller_stat.rb_node;
  141. struct rb_node *parent = NULL;
  142. struct alloc_stat *data = NULL;
  143. while (*node) {
  144. parent = *node;
  145. data = rb_entry(*node, struct alloc_stat, node);
  146. if (call_site > data->call_site)
  147. node = &(*node)->rb_right;
  148. else if (call_site < data->call_site)
  149. node = &(*node)->rb_left;
  150. else
  151. break;
  152. }
  153. if (data && data->call_site == call_site) {
  154. data->hit++;
  155. data->bytes_req += bytes_req;
  156. data->bytes_alloc += bytes_alloc;
  157. } else {
  158. data = malloc(sizeof(*data));
  159. if (!data) {
  160. pr_err("%s: malloc failed\n", __func__);
  161. return -1;
  162. }
  163. data->call_site = call_site;
  164. data->pingpong = 0;
  165. data->hit = 1;
  166. data->bytes_req = bytes_req;
  167. data->bytes_alloc = bytes_alloc;
  168. rb_link_node(&data->node, parent, node);
  169. rb_insert_color(&data->node, &root_caller_stat);
  170. }
  171. return 0;
  172. }
  173. static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
  174. struct perf_sample *sample)
  175. {
  176. unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
  177. call_site = perf_evsel__intval(evsel, sample, "call_site");
  178. int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
  179. bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
  180. if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
  181. insert_caller_stat(call_site, bytes_req, bytes_alloc))
  182. return -1;
  183. total_requested += bytes_req;
  184. total_allocated += bytes_alloc;
  185. nr_allocs++;
  186. return 0;
  187. }
  188. static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
  189. struct perf_sample *sample)
  190. {
  191. int ret = perf_evsel__process_alloc_event(evsel, sample);
  192. if (!ret) {
  193. int node1 = cpunode_map[sample->cpu],
  194. node2 = perf_evsel__intval(evsel, sample, "node");
  195. if (node1 != node2)
  196. nr_cross_allocs++;
  197. }
  198. return ret;
  199. }
  200. static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
  201. static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
  202. static struct alloc_stat *search_alloc_stat(unsigned long ptr,
  203. unsigned long call_site,
  204. struct rb_root *root,
  205. sort_fn_t sort_fn)
  206. {
  207. struct rb_node *node = root->rb_node;
  208. struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
  209. while (node) {
  210. struct alloc_stat *data;
  211. int cmp;
  212. data = rb_entry(node, struct alloc_stat, node);
  213. cmp = sort_fn(&key, data);
  214. if (cmp < 0)
  215. node = node->rb_left;
  216. else if (cmp > 0)
  217. node = node->rb_right;
  218. else
  219. return data;
  220. }
  221. return NULL;
  222. }
  223. static int perf_evsel__process_free_event(struct perf_evsel *evsel,
  224. struct perf_sample *sample)
  225. {
  226. unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
  227. struct alloc_stat *s_alloc, *s_caller;
  228. s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
  229. if (!s_alloc)
  230. return 0;
  231. if ((short)sample->cpu != s_alloc->alloc_cpu) {
  232. s_alloc->pingpong++;
  233. s_caller = search_alloc_stat(0, s_alloc->call_site,
  234. &root_caller_stat, callsite_cmp);
  235. if (!s_caller)
  236. return -1;
  237. s_caller->pingpong++;
  238. }
  239. s_alloc->alloc_cpu = -1;
  240. return 0;
  241. }
  242. typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
  243. struct perf_sample *sample);
  244. static int process_sample_event(struct perf_tool *tool __maybe_unused,
  245. union perf_event *event,
  246. struct perf_sample *sample,
  247. struct perf_evsel *evsel,
  248. struct machine *machine)
  249. {
  250. struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
  251. if (thread == NULL) {
  252. pr_debug("problem processing %d event, skipping it.\n",
  253. event->header.type);
  254. return -1;
  255. }
  256. dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  257. if (evsel->handler.func != NULL) {
  258. tracepoint_handler f = evsel->handler.func;
  259. return f(evsel, sample);
  260. }
  261. return 0;
  262. }
  263. static struct perf_tool perf_kmem = {
  264. .sample = process_sample_event,
  265. .comm = perf_event__process_comm,
  266. .ordered_samples = true,
  267. };
  268. static double fragmentation(unsigned long n_req, unsigned long n_alloc)
  269. {
  270. if (n_alloc == 0)
  271. return 0.0;
  272. else
  273. return 100.0 - (100.0 * n_req / n_alloc);
  274. }
  275. static void __print_result(struct rb_root *root, struct perf_session *session,
  276. int n_lines, int is_caller)
  277. {
  278. struct rb_node *next;
  279. struct machine *machine = &session->machines.host;
  280. printf("%.102s\n", graph_dotted_line);
  281. printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
  282. printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
  283. printf("%.102s\n", graph_dotted_line);
  284. next = rb_first(root);
  285. while (next && n_lines--) {
  286. struct alloc_stat *data = rb_entry(next, struct alloc_stat,
  287. node);
  288. struct symbol *sym = NULL;
  289. struct map *map;
  290. char buf[BUFSIZ];
  291. u64 addr;
  292. if (is_caller) {
  293. addr = data->call_site;
  294. if (!raw_ip)
  295. sym = machine__find_kernel_function(machine, addr, &map, NULL);
  296. } else
  297. addr = data->ptr;
  298. if (sym != NULL)
  299. snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
  300. addr - map->unmap_ip(map, sym->start));
  301. else
  302. snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
  303. printf(" %-34s |", buf);
  304. printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
  305. (unsigned long long)data->bytes_alloc,
  306. (unsigned long)data->bytes_alloc / data->hit,
  307. (unsigned long long)data->bytes_req,
  308. (unsigned long)data->bytes_req / data->hit,
  309. (unsigned long)data->hit,
  310. (unsigned long)data->pingpong,
  311. fragmentation(data->bytes_req, data->bytes_alloc));
  312. next = rb_next(next);
  313. }
  314. if (n_lines == -1)
  315. printf(" ... | ... | ... | ... | ... | ... \n");
  316. printf("%.102s\n", graph_dotted_line);
  317. }
  318. static void print_summary(void)
  319. {
  320. printf("\nSUMMARY\n=======\n");
  321. printf("Total bytes requested: %lu\n", total_requested);
  322. printf("Total bytes allocated: %lu\n", total_allocated);
  323. printf("Total bytes wasted on internal fragmentation: %lu\n",
  324. total_allocated - total_requested);
  325. printf("Internal fragmentation: %f%%\n",
  326. fragmentation(total_requested, total_allocated));
  327. printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
  328. }
  329. static void print_result(struct perf_session *session)
  330. {
  331. if (caller_flag)
  332. __print_result(&root_caller_sorted, session, caller_lines, 1);
  333. if (alloc_flag)
  334. __print_result(&root_alloc_sorted, session, alloc_lines, 0);
  335. print_summary();
  336. }
  337. struct sort_dimension {
  338. const char name[20];
  339. sort_fn_t cmp;
  340. struct list_head list;
  341. };
  342. static LIST_HEAD(caller_sort);
  343. static LIST_HEAD(alloc_sort);
  344. static void sort_insert(struct rb_root *root, struct alloc_stat *data,
  345. struct list_head *sort_list)
  346. {
  347. struct rb_node **new = &(root->rb_node);
  348. struct rb_node *parent = NULL;
  349. struct sort_dimension *sort;
  350. while (*new) {
  351. struct alloc_stat *this;
  352. int cmp = 0;
  353. this = rb_entry(*new, struct alloc_stat, node);
  354. parent = *new;
  355. list_for_each_entry(sort, sort_list, list) {
  356. cmp = sort->cmp(data, this);
  357. if (cmp)
  358. break;
  359. }
  360. if (cmp > 0)
  361. new = &((*new)->rb_left);
  362. else
  363. new = &((*new)->rb_right);
  364. }
  365. rb_link_node(&data->node, parent, new);
  366. rb_insert_color(&data->node, root);
  367. }
  368. static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
  369. struct list_head *sort_list)
  370. {
  371. struct rb_node *node;
  372. struct alloc_stat *data;
  373. for (;;) {
  374. node = rb_first(root);
  375. if (!node)
  376. break;
  377. rb_erase(node, root);
  378. data = rb_entry(node, struct alloc_stat, node);
  379. sort_insert(root_sorted, data, sort_list);
  380. }
  381. }
  382. static void sort_result(void)
  383. {
  384. __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
  385. __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
  386. }
  387. static int __cmd_kmem(void)
  388. {
  389. int err = -EINVAL;
  390. struct perf_session *session;
  391. const struct perf_evsel_str_handler kmem_tracepoints[] = {
  392. { "kmem:kmalloc", perf_evsel__process_alloc_event, },
  393. { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
  394. { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
  395. { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
  396. { "kmem:kfree", perf_evsel__process_free_event, },
  397. { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
  398. };
  399. session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem);
  400. if (session == NULL)
  401. return -ENOMEM;
  402. if (perf_session__create_kernel_maps(session) < 0)
  403. goto out_delete;
  404. if (!perf_session__has_traces(session, "kmem record"))
  405. goto out_delete;
  406. if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
  407. pr_err("Initializing perf session tracepoint handlers failed\n");
  408. return -1;
  409. }
  410. setup_pager();
  411. err = perf_session__process_events(session, &perf_kmem);
  412. if (err != 0)
  413. goto out_delete;
  414. sort_result();
  415. print_result(session);
  416. out_delete:
  417. perf_session__delete(session);
  418. return err;
  419. }
  420. static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
  421. {
  422. if (l->ptr < r->ptr)
  423. return -1;
  424. else if (l->ptr > r->ptr)
  425. return 1;
  426. return 0;
  427. }
  428. static struct sort_dimension ptr_sort_dimension = {
  429. .name = "ptr",
  430. .cmp = ptr_cmp,
  431. };
  432. static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
  433. {
  434. if (l->call_site < r->call_site)
  435. return -1;
  436. else if (l->call_site > r->call_site)
  437. return 1;
  438. return 0;
  439. }
  440. static struct sort_dimension callsite_sort_dimension = {
  441. .name = "callsite",
  442. .cmp = callsite_cmp,
  443. };
  444. static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
  445. {
  446. if (l->hit < r->hit)
  447. return -1;
  448. else if (l->hit > r->hit)
  449. return 1;
  450. return 0;
  451. }
  452. static struct sort_dimension hit_sort_dimension = {
  453. .name = "hit",
  454. .cmp = hit_cmp,
  455. };
  456. static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
  457. {
  458. if (l->bytes_alloc < r->bytes_alloc)
  459. return -1;
  460. else if (l->bytes_alloc > r->bytes_alloc)
  461. return 1;
  462. return 0;
  463. }
  464. static struct sort_dimension bytes_sort_dimension = {
  465. .name = "bytes",
  466. .cmp = bytes_cmp,
  467. };
  468. static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
  469. {
  470. double x, y;
  471. x = fragmentation(l->bytes_req, l->bytes_alloc);
  472. y = fragmentation(r->bytes_req, r->bytes_alloc);
  473. if (x < y)
  474. return -1;
  475. else if (x > y)
  476. return 1;
  477. return 0;
  478. }
  479. static struct sort_dimension frag_sort_dimension = {
  480. .name = "frag",
  481. .cmp = frag_cmp,
  482. };
  483. static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
  484. {
  485. if (l->pingpong < r->pingpong)
  486. return -1;
  487. else if (l->pingpong > r->pingpong)
  488. return 1;
  489. return 0;
  490. }
  491. static struct sort_dimension pingpong_sort_dimension = {
  492. .name = "pingpong",
  493. .cmp = pingpong_cmp,
  494. };
  495. static struct sort_dimension *avail_sorts[] = {
  496. &ptr_sort_dimension,
  497. &callsite_sort_dimension,
  498. &hit_sort_dimension,
  499. &bytes_sort_dimension,
  500. &frag_sort_dimension,
  501. &pingpong_sort_dimension,
  502. };
  503. #define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts))
  504. static int sort_dimension__add(const char *tok, struct list_head *list)
  505. {
  506. struct sort_dimension *sort;
  507. int i;
  508. for (i = 0; i < NUM_AVAIL_SORTS; i++) {
  509. if (!strcmp(avail_sorts[i]->name, tok)) {
  510. sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
  511. if (!sort) {
  512. pr_err("%s: memdup failed\n", __func__);
  513. return -1;
  514. }
  515. list_add_tail(&sort->list, list);
  516. return 0;
  517. }
  518. }
  519. return -1;
  520. }
  521. static int setup_sorting(struct list_head *sort_list, const char *arg)
  522. {
  523. char *tok;
  524. char *str = strdup(arg);
  525. if (!str) {
  526. pr_err("%s: strdup failed\n", __func__);
  527. return -1;
  528. }
  529. while (true) {
  530. tok = strsep(&str, ",");
  531. if (!tok)
  532. break;
  533. if (sort_dimension__add(tok, sort_list) < 0) {
  534. error("Unknown --sort key: '%s'", tok);
  535. free(str);
  536. return -1;
  537. }
  538. }
  539. free(str);
  540. return 0;
  541. }
  542. static int parse_sort_opt(const struct option *opt __maybe_unused,
  543. const char *arg, int unset __maybe_unused)
  544. {
  545. if (!arg)
  546. return -1;
  547. if (caller_flag > alloc_flag)
  548. return setup_sorting(&caller_sort, arg);
  549. else
  550. return setup_sorting(&alloc_sort, arg);
  551. return 0;
  552. }
  553. static int parse_caller_opt(const struct option *opt __maybe_unused,
  554. const char *arg __maybe_unused,
  555. int unset __maybe_unused)
  556. {
  557. caller_flag = (alloc_flag + 1);
  558. return 0;
  559. }
  560. static int parse_alloc_opt(const struct option *opt __maybe_unused,
  561. const char *arg __maybe_unused,
  562. int unset __maybe_unused)
  563. {
  564. alloc_flag = (caller_flag + 1);
  565. return 0;
  566. }
  567. static int parse_line_opt(const struct option *opt __maybe_unused,
  568. const char *arg, int unset __maybe_unused)
  569. {
  570. int lines;
  571. if (!arg)
  572. return -1;
  573. lines = strtoul(arg, NULL, 10);
  574. if (caller_flag > alloc_flag)
  575. caller_lines = lines;
  576. else
  577. alloc_lines = lines;
  578. return 0;
  579. }
  580. static int __cmd_record(int argc, const char **argv)
  581. {
  582. const char * const record_args[] = {
  583. "record", "-a", "-R", "-f", "-c", "1",
  584. "-e", "kmem:kmalloc",
  585. "-e", "kmem:kmalloc_node",
  586. "-e", "kmem:kfree",
  587. "-e", "kmem:kmem_cache_alloc",
  588. "-e", "kmem:kmem_cache_alloc_node",
  589. "-e", "kmem:kmem_cache_free",
  590. };
  591. unsigned int rec_argc, i, j;
  592. const char **rec_argv;
  593. rec_argc = ARRAY_SIZE(record_args) + argc - 1;
  594. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  595. if (rec_argv == NULL)
  596. return -ENOMEM;
  597. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  598. rec_argv[i] = strdup(record_args[i]);
  599. for (j = 1; j < (unsigned int)argc; j++, i++)
  600. rec_argv[i] = argv[j];
  601. return cmd_record(i, rec_argv, NULL);
  602. }
  603. int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
  604. {
  605. const char * const default_sort_order = "frag,hit,bytes";
  606. const struct option kmem_options[] = {
  607. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  608. OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
  609. "show per-callsite statistics", parse_caller_opt),
  610. OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
  611. "show per-allocation statistics", parse_alloc_opt),
  612. OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
  613. "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
  614. parse_sort_opt),
  615. OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
  616. OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
  617. OPT_END()
  618. };
  619. const char * const kmem_usage[] = {
  620. "perf kmem [<options>] {record|stat}",
  621. NULL
  622. };
  623. argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
  624. if (!argc)
  625. usage_with_options(kmem_usage, kmem_options);
  626. symbol__init();
  627. if (!strncmp(argv[0], "rec", 3)) {
  628. return __cmd_record(argc, argv);
  629. } else if (!strcmp(argv[0], "stat")) {
  630. if (setup_cpunode_map())
  631. return -1;
  632. if (list_empty(&caller_sort))
  633. setup_sorting(&caller_sort, default_sort_order);
  634. if (list_empty(&alloc_sort))
  635. setup_sorting(&alloc_sort, default_sort_order);
  636. return __cmd_kmem();
  637. } else
  638. usage_with_options(kmem_usage, kmem_options);
  639. return 0;
  640. }