builtin-lock.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. #include "builtin.h"
  2. #include "perf.h"
  3. #include "util/util.h"
  4. #include "util/cache.h"
  5. #include "util/symbol.h"
  6. #include "util/thread.h"
  7. #include "util/header.h"
  8. #include "util/parse-options.h"
  9. #include "util/trace-event.h"
  10. #include "util/debug.h"
  11. #include "util/session.h"
  12. #include <sys/types.h>
  13. #include <sys/prctl.h>
  14. #include <semaphore.h>
  15. #include <pthread.h>
  16. #include <math.h>
  17. #include <limits.h>
  18. #include <linux/list.h>
  19. #include <linux/hash.h>
  20. /* based on kernel/lockdep.c */
  21. #define LOCKHASH_BITS 12
  22. #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
  23. static struct list_head lockhash_table[LOCKHASH_SIZE];
  24. #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
  25. #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
  26. #define LOCK_STATE_UNLOCKED 0 /* initial state */
  27. #define LOCK_STATE_LOCKED 1
  28. struct lock_stat {
  29. struct list_head hash_entry;
  30. struct rb_node rb; /* used for sorting */
  31. /*
  32. * FIXME: raw_field_value() returns unsigned long long,
  33. * so address of lockdep_map should be dealed as 64bit.
  34. * Is there more better solution?
  35. */
  36. void *addr; /* address of lockdep_map, used as ID */
  37. char *name; /* for strcpy(), we cannot use const */
  38. int state;
  39. u64 prev_event_time; /* timestamp of previous event */
  40. unsigned int nr_acquired;
  41. unsigned int nr_acquire;
  42. unsigned int nr_contended;
  43. unsigned int nr_release;
  44. /* these times are in nano sec. */
  45. u64 wait_time_total;
  46. u64 wait_time_min;
  47. u64 wait_time_max;
  48. };
  49. /* build simple key function one is bigger than two */
  50. #define SINGLE_KEY(member) \
  51. static int lock_stat_key_ ## member(struct lock_stat *one, \
  52. struct lock_stat *two) \
  53. { \
  54. return one->member > two->member; \
  55. }
  56. SINGLE_KEY(nr_acquired)
  57. SINGLE_KEY(nr_contended)
  58. SINGLE_KEY(wait_time_total)
  59. SINGLE_KEY(wait_time_min)
  60. SINGLE_KEY(wait_time_max)
  61. struct lock_key {
  62. /*
  63. * name: the value for specify by user
  64. * this should be simpler than raw name of member
  65. * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
  66. */
  67. const char *name;
  68. int (*key)(struct lock_stat*, struct lock_stat*);
  69. };
  70. static const char *sort_key = "acquired";
  71. static int (*compare)(struct lock_stat *, struct lock_stat *);
  72. static struct rb_root result; /* place to store sorted data */
  73. #define DEF_KEY_LOCK(name, fn_suffix) \
  74. { #name, lock_stat_key_ ## fn_suffix }
  75. struct lock_key keys[] = {
  76. DEF_KEY_LOCK(acquired, nr_acquired),
  77. DEF_KEY_LOCK(contended, nr_contended),
  78. DEF_KEY_LOCK(wait_total, wait_time_total),
  79. DEF_KEY_LOCK(wait_min, wait_time_min),
  80. DEF_KEY_LOCK(wait_max, wait_time_max),
  81. /* extra comparisons much complicated should be here */
  82. { NULL, NULL }
  83. };
  84. static void select_key(void)
  85. {
  86. int i;
  87. for (i = 0; keys[i].name; i++) {
  88. if (!strcmp(keys[i].name, sort_key)) {
  89. compare = keys[i].key;
  90. return;
  91. }
  92. }
  93. die("Unknown compare key:%s\n", sort_key);
  94. }
  95. static void insert_to_result(struct lock_stat *st,
  96. int (*bigger)(struct lock_stat *, struct lock_stat *))
  97. {
  98. struct rb_node **rb = &result.rb_node;
  99. struct rb_node *parent = NULL;
  100. struct lock_stat *p;
  101. while (*rb) {
  102. p = container_of(*rb, struct lock_stat, rb);
  103. parent = *rb;
  104. if (bigger(st, p))
  105. rb = &(*rb)->rb_left;
  106. else
  107. rb = &(*rb)->rb_right;
  108. }
  109. rb_link_node(&st->rb, parent, rb);
  110. rb_insert_color(&st->rb, &result);
  111. }
  112. /* returns left most element of result, and erase it */
  113. static struct lock_stat *pop_from_result(void)
  114. {
  115. struct rb_node *node = result.rb_node;
  116. if (!node)
  117. return NULL;
  118. while (node->rb_left)
  119. node = node->rb_left;
  120. rb_erase(node, &result);
  121. return container_of(node, struct lock_stat, rb);
  122. }
  123. static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
  124. {
  125. struct list_head *entry = lockhashentry(addr);
  126. struct lock_stat *ret, *new;
  127. list_for_each_entry(ret, entry, hash_entry) {
  128. if (ret->addr == addr)
  129. return ret;
  130. }
  131. new = zalloc(sizeof(struct lock_stat));
  132. if (!new)
  133. goto alloc_failed;
  134. new->addr = addr;
  135. new->name = zalloc(sizeof(char) * strlen(name) + 1);
  136. if (!new->name)
  137. goto alloc_failed;
  138. strcpy(new->name, name);
  139. /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
  140. new->state = LOCK_STATE_UNLOCKED;
  141. new->wait_time_min = ULLONG_MAX;
  142. list_add(&new->hash_entry, entry);
  143. return new;
  144. alloc_failed:
  145. die("memory allocation failed\n");
  146. }
  147. static char const *input_name = "perf.data";
  148. static int profile_cpu = -1;
  149. struct raw_event_sample {
  150. u32 size;
  151. char data[0];
  152. };
  153. struct trace_acquire_event {
  154. void *addr;
  155. const char *name;
  156. };
  157. struct trace_acquired_event {
  158. void *addr;
  159. const char *name;
  160. };
  161. struct trace_contended_event {
  162. void *addr;
  163. const char *name;
  164. };
  165. struct trace_release_event {
  166. void *addr;
  167. const char *name;
  168. };
  169. struct trace_lock_handler {
  170. void (*acquire_event)(struct trace_acquire_event *,
  171. struct event *,
  172. int cpu,
  173. u64 timestamp,
  174. struct thread *thread);
  175. void (*acquired_event)(struct trace_acquired_event *,
  176. struct event *,
  177. int cpu,
  178. u64 timestamp,
  179. struct thread *thread);
  180. void (*contended_event)(struct trace_contended_event *,
  181. struct event *,
  182. int cpu,
  183. u64 timestamp,
  184. struct thread *thread);
  185. void (*release_event)(struct trace_release_event *,
  186. struct event *,
  187. int cpu,
  188. u64 timestamp,
  189. struct thread *thread);
  190. };
  191. static void
  192. report_lock_acquire_event(struct trace_acquire_event *acquire_event,
  193. struct event *__event __used,
  194. int cpu __used,
  195. u64 timestamp,
  196. struct thread *thread __used)
  197. {
  198. struct lock_stat *st;
  199. st = lock_stat_findnew(acquire_event->addr, acquire_event->name);
  200. switch (st->state) {
  201. case LOCK_STATE_UNLOCKED:
  202. break;
  203. case LOCK_STATE_LOCKED:
  204. break;
  205. default:
  206. BUG_ON(1);
  207. break;
  208. }
  209. st->prev_event_time = timestamp;
  210. }
  211. static void
  212. report_lock_acquired_event(struct trace_acquired_event *acquired_event,
  213. struct event *__event __used,
  214. int cpu __used,
  215. u64 timestamp,
  216. struct thread *thread __used)
  217. {
  218. struct lock_stat *st;
  219. st = lock_stat_findnew(acquired_event->addr, acquired_event->name);
  220. switch (st->state) {
  221. case LOCK_STATE_UNLOCKED:
  222. st->state = LOCK_STATE_LOCKED;
  223. st->nr_acquired++;
  224. break;
  225. case LOCK_STATE_LOCKED:
  226. break;
  227. default:
  228. BUG_ON(1);
  229. break;
  230. }
  231. st->prev_event_time = timestamp;
  232. }
  233. static void
  234. report_lock_contended_event(struct trace_contended_event *contended_event,
  235. struct event *__event __used,
  236. int cpu __used,
  237. u64 timestamp,
  238. struct thread *thread __used)
  239. {
  240. struct lock_stat *st;
  241. st = lock_stat_findnew(contended_event->addr, contended_event->name);
  242. switch (st->state) {
  243. case LOCK_STATE_UNLOCKED:
  244. break;
  245. case LOCK_STATE_LOCKED:
  246. st->nr_contended++;
  247. break;
  248. default:
  249. BUG_ON(1);
  250. break;
  251. }
  252. st->prev_event_time = timestamp;
  253. }
  254. static void
  255. report_lock_release_event(struct trace_release_event *release_event,
  256. struct event *__event __used,
  257. int cpu __used,
  258. u64 timestamp,
  259. struct thread *thread __used)
  260. {
  261. struct lock_stat *st;
  262. u64 hold_time;
  263. st = lock_stat_findnew(release_event->addr, release_event->name);
  264. switch (st->state) {
  265. case LOCK_STATE_UNLOCKED:
  266. break;
  267. case LOCK_STATE_LOCKED:
  268. st->state = LOCK_STATE_UNLOCKED;
  269. hold_time = timestamp - st->prev_event_time;
  270. if (timestamp < st->prev_event_time) {
  271. /* terribly, this can happen... */
  272. goto end;
  273. }
  274. if (st->wait_time_min > hold_time)
  275. st->wait_time_min = hold_time;
  276. if (st->wait_time_max < hold_time)
  277. st->wait_time_max = hold_time;
  278. st->wait_time_total += hold_time;
  279. st->nr_release++;
  280. break;
  281. default:
  282. BUG_ON(1);
  283. break;
  284. }
  285. end:
  286. st->prev_event_time = timestamp;
  287. }
  288. /* lock oriented handlers */
  289. /* TODO: handlers for CPU oriented, thread oriented */
  290. static struct trace_lock_handler report_lock_ops = {
  291. .acquire_event = report_lock_acquire_event,
  292. .acquired_event = report_lock_acquired_event,
  293. .contended_event = report_lock_contended_event,
  294. .release_event = report_lock_release_event,
  295. };
  296. static struct trace_lock_handler *trace_handler;
  297. static void
  298. process_lock_acquire_event(void *data,
  299. struct event *event __used,
  300. int cpu __used,
  301. u64 timestamp __used,
  302. struct thread *thread __used)
  303. {
  304. struct trace_acquire_event acquire_event;
  305. u64 tmp; /* this is required for casting... */
  306. tmp = raw_field_value(event, "lockdep_addr", data);
  307. memcpy(&acquire_event.addr, &tmp, sizeof(void *));
  308. acquire_event.name = (char *)raw_field_ptr(event, "name", data);
  309. if (trace_handler->acquire_event)
  310. trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
  311. }
  312. static void
  313. process_lock_acquired_event(void *data,
  314. struct event *event __used,
  315. int cpu __used,
  316. u64 timestamp __used,
  317. struct thread *thread __used)
  318. {
  319. struct trace_acquired_event acquired_event;
  320. u64 tmp; /* this is required for casting... */
  321. tmp = raw_field_value(event, "lockdep_addr", data);
  322. memcpy(&acquired_event.addr, &tmp, sizeof(void *));
  323. acquired_event.name = (char *)raw_field_ptr(event, "name", data);
  324. if (trace_handler->acquire_event)
  325. trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
  326. }
  327. static void
  328. process_lock_contended_event(void *data,
  329. struct event *event __used,
  330. int cpu __used,
  331. u64 timestamp __used,
  332. struct thread *thread __used)
  333. {
  334. struct trace_contended_event contended_event;
  335. u64 tmp; /* this is required for casting... */
  336. tmp = raw_field_value(event, "lockdep_addr", data);
  337. memcpy(&contended_event.addr, &tmp, sizeof(void *));
  338. contended_event.name = (char *)raw_field_ptr(event, "name", data);
  339. if (trace_handler->acquire_event)
  340. trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
  341. }
  342. static void
  343. process_lock_release_event(void *data,
  344. struct event *event __used,
  345. int cpu __used,
  346. u64 timestamp __used,
  347. struct thread *thread __used)
  348. {
  349. struct trace_release_event release_event;
  350. u64 tmp; /* this is required for casting... */
  351. tmp = raw_field_value(event, "lockdep_addr", data);
  352. memcpy(&release_event.addr, &tmp, sizeof(void *));
  353. release_event.name = (char *)raw_field_ptr(event, "name", data);
  354. if (trace_handler->acquire_event)
  355. trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
  356. }
  357. static void
  358. process_raw_event(void *data, int cpu,
  359. u64 timestamp, struct thread *thread)
  360. {
  361. struct event *event;
  362. int type;
  363. type = trace_parse_common_type(data);
  364. event = trace_find_event(type);
  365. if (!strcmp(event->name, "lock_acquire"))
  366. process_lock_acquire_event(data, event, cpu, timestamp, thread);
  367. if (!strcmp(event->name, "lock_acquired"))
  368. process_lock_acquired_event(data, event, cpu, timestamp, thread);
  369. if (!strcmp(event->name, "lock_contended"))
  370. process_lock_contended_event(data, event, cpu, timestamp, thread);
  371. if (!strcmp(event->name, "lock_release"))
  372. process_lock_release_event(data, event, cpu, timestamp, thread);
  373. }
  374. struct raw_event_queue {
  375. u64 timestamp;
  376. int cpu;
  377. void *data;
  378. struct thread *thread;
  379. struct list_head list;
  380. };
  381. static LIST_HEAD(raw_event_head);
  382. #define FLUSH_PERIOD (5 * NSEC_PER_SEC)
  383. static u64 flush_limit = ULLONG_MAX;
  384. static u64 last_flush = 0;
  385. struct raw_event_queue *last_inserted;
  386. static void flush_raw_event_queue(u64 limit)
  387. {
  388. struct raw_event_queue *tmp, *iter;
  389. list_for_each_entry_safe(iter, tmp, &raw_event_head, list) {
  390. if (iter->timestamp > limit)
  391. return;
  392. if (iter == last_inserted)
  393. last_inserted = NULL;
  394. process_raw_event(iter->data, iter->cpu, iter->timestamp,
  395. iter->thread);
  396. last_flush = iter->timestamp;
  397. list_del(&iter->list);
  398. free(iter->data);
  399. free(iter);
  400. }
  401. }
  402. static void __queue_raw_event_end(struct raw_event_queue *new)
  403. {
  404. struct raw_event_queue *iter;
  405. list_for_each_entry_reverse(iter, &raw_event_head, list) {
  406. if (iter->timestamp < new->timestamp) {
  407. list_add(&new->list, &iter->list);
  408. return;
  409. }
  410. }
  411. list_add(&new->list, &raw_event_head);
  412. }
  413. static void __queue_raw_event_before(struct raw_event_queue *new,
  414. struct raw_event_queue *iter)
  415. {
  416. list_for_each_entry_continue_reverse(iter, &raw_event_head, list) {
  417. if (iter->timestamp < new->timestamp) {
  418. list_add(&new->list, &iter->list);
  419. return;
  420. }
  421. }
  422. list_add(&new->list, &raw_event_head);
  423. }
  424. static void __queue_raw_event_after(struct raw_event_queue *new,
  425. struct raw_event_queue *iter)
  426. {
  427. list_for_each_entry_continue(iter, &raw_event_head, list) {
  428. if (iter->timestamp > new->timestamp) {
  429. list_add_tail(&new->list, &iter->list);
  430. return;
  431. }
  432. }
  433. list_add_tail(&new->list, &raw_event_head);
  434. }
  435. /* The queue is ordered by time */
  436. static void __queue_raw_event(struct raw_event_queue *new)
  437. {
  438. if (!last_inserted) {
  439. __queue_raw_event_end(new);
  440. return;
  441. }
  442. /*
  443. * Most of the time the current event has a timestamp
  444. * very close to the last event inserted, unless we just switched
  445. * to another event buffer. Having a sorting based on a list and
  446. * on the last inserted event that is close to the current one is
  447. * probably more efficient than an rbtree based sorting.
  448. */
  449. if (last_inserted->timestamp >= new->timestamp)
  450. __queue_raw_event_before(new, last_inserted);
  451. else
  452. __queue_raw_event_after(new, last_inserted);
  453. }
  454. static void queue_raw_event(void *data, int raw_size, int cpu,
  455. u64 timestamp, struct thread *thread)
  456. {
  457. struct raw_event_queue *new;
  458. if (flush_limit == ULLONG_MAX)
  459. flush_limit = timestamp + FLUSH_PERIOD;
  460. if (timestamp < last_flush) {
  461. printf("Warning: Timestamp below last timeslice flush\n");
  462. return;
  463. }
  464. new = malloc(sizeof(*new));
  465. if (!new)
  466. die("Not enough memory\n");
  467. new->timestamp = timestamp;
  468. new->cpu = cpu;
  469. new->thread = thread;
  470. new->data = malloc(raw_size);
  471. if (!new->data)
  472. die("Not enough memory\n");
  473. memcpy(new->data, data, raw_size);
  474. __queue_raw_event(new);
  475. last_inserted = new;
  476. /*
  477. * We want to have a slice of events covering 2 * FLUSH_PERIOD
  478. * If FLUSH_PERIOD is big enough, it ensures every events that occured
  479. * in the first half of the timeslice have all been buffered and there
  480. * are none remaining (we need that because of the weakly ordered
  481. * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
  482. * timeslice, we flush the first half to be gentle with the memory
  483. * (the second half can still get new events in the middle, so wait
  484. * another period to flush it)
  485. */
  486. if (new->timestamp > flush_limit &&
  487. new->timestamp - flush_limit > FLUSH_PERIOD) {
  488. flush_limit += FLUSH_PERIOD;
  489. flush_raw_event_queue(flush_limit);
  490. }
  491. }
  492. static int process_sample_event(event_t *event, struct perf_session *session)
  493. {
  494. struct thread *thread;
  495. struct sample_data data;
  496. bzero(&data, sizeof(struct sample_data));
  497. event__parse_sample(event, session->sample_type, &data);
  498. thread = perf_session__findnew(session, data.pid);
  499. if (thread == NULL) {
  500. pr_debug("problem processing %d event, skipping it.\n",
  501. event->header.type);
  502. return -1;
  503. }
  504. dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  505. if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
  506. return 0;
  507. queue_raw_event(data.raw_data, data.raw_size, data.cpu, data.time, thread);
  508. return 0;
  509. }
  510. /* TODO: various way to print, coloring, nano or milli sec */
  511. static void print_result(void)
  512. {
  513. struct lock_stat *st;
  514. char cut_name[20];
  515. printf("%18s ", "ID");
  516. printf("%20s ", "Name");
  517. printf("%10s ", "acquired");
  518. printf("%10s ", "contended");
  519. printf("%15s ", "total wait (ns)");
  520. printf("%15s ", "max wait (ns)");
  521. printf("%15s ", "min wait (ns)");
  522. printf("\n\n");
  523. while ((st = pop_from_result())) {
  524. bzero(cut_name, 20);
  525. printf("%p ", st->addr);
  526. if (strlen(st->name) < 16) {
  527. /* output raw name */
  528. printf("%20s ", st->name);
  529. } else {
  530. strncpy(cut_name, st->name, 16);
  531. cut_name[16] = '.';
  532. cut_name[17] = '.';
  533. cut_name[18] = '.';
  534. cut_name[19] = '\0';
  535. /* cut off name for saving output style */
  536. printf("%20s ", cut_name);
  537. }
  538. printf("%10u ", st->nr_acquired);
  539. printf("%10u ", st->nr_contended);
  540. printf("%15llu ", st->wait_time_total);
  541. printf("%15llu ", st->wait_time_max);
  542. printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
  543. 0 : st->wait_time_min);
  544. printf("\n");
  545. }
  546. }
  547. static void dump_map(void)
  548. {
  549. unsigned int i;
  550. struct lock_stat *st;
  551. for (i = 0; i < LOCKHASH_SIZE; i++) {
  552. list_for_each_entry(st, &lockhash_table[i], hash_entry) {
  553. printf("%p: %s\n", st->addr, st->name);
  554. }
  555. }
  556. }
  557. static struct perf_event_ops eops = {
  558. .sample = process_sample_event,
  559. .comm = event__process_comm,
  560. };
  561. static struct perf_session *session;
  562. static int read_events(void)
  563. {
  564. session = perf_session__new(input_name, O_RDONLY, 0);
  565. if (!session)
  566. die("Initializing perf session failed\n");
  567. return perf_session__process_events(session, &eops);
  568. }
  569. static void sort_result(void)
  570. {
  571. unsigned int i;
  572. struct lock_stat *st;
  573. for (i = 0; i < LOCKHASH_SIZE; i++) {
  574. list_for_each_entry(st, &lockhash_table[i], hash_entry) {
  575. insert_to_result(st, compare);
  576. }
  577. }
  578. }
  579. static void __cmd_report(void)
  580. {
  581. setup_pager();
  582. select_key();
  583. read_events();
  584. flush_raw_event_queue(ULLONG_MAX);
  585. sort_result();
  586. print_result();
  587. }
  588. static const char * const report_usage[] = {
  589. "perf lock report [<options>]",
  590. NULL
  591. };
  592. static const struct option report_options[] = {
  593. OPT_STRING('k', "key", &sort_key, "acquired",
  594. "key for sorting"),
  595. /* TODO: type */
  596. OPT_END()
  597. };
  598. static const char * const lock_usage[] = {
  599. "perf lock [<options>] {record|trace|report}",
  600. NULL
  601. };
  602. static const struct option lock_options[] = {
  603. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  604. OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
  605. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
  606. OPT_END()
  607. };
  608. static const char *record_args[] = {
  609. "record",
  610. "-a",
  611. "-R",
  612. "-f",
  613. "-m", "1024",
  614. "-c", "1",
  615. "-e", "lock:lock_acquire:r",
  616. "-e", "lock:lock_acquired:r",
  617. "-e", "lock:lock_contended:r",
  618. "-e", "lock:lock_release:r",
  619. };
  620. static int __cmd_record(int argc, const char **argv)
  621. {
  622. unsigned int rec_argc, i, j;
  623. const char **rec_argv;
  624. rec_argc = ARRAY_SIZE(record_args) + argc - 1;
  625. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  626. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  627. rec_argv[i] = strdup(record_args[i]);
  628. for (j = 1; j < (unsigned int)argc; j++, i++)
  629. rec_argv[i] = argv[j];
  630. BUG_ON(i != rec_argc);
  631. return cmd_record(i, rec_argv, NULL);
  632. }
  633. int cmd_lock(int argc, const char **argv, const char *prefix __used)
  634. {
  635. unsigned int i;
  636. symbol__init();
  637. for (i = 0; i < LOCKHASH_SIZE; i++)
  638. INIT_LIST_HEAD(lockhash_table + i);
  639. argc = parse_options(argc, argv, lock_options, lock_usage,
  640. PARSE_OPT_STOP_AT_NON_OPTION);
  641. if (!argc)
  642. usage_with_options(lock_usage, lock_options);
  643. if (!strncmp(argv[0], "rec", 3)) {
  644. return __cmd_record(argc, argv);
  645. } else if (!strncmp(argv[0], "report", 6)) {
  646. trace_handler = &report_lock_ops;
  647. if (argc) {
  648. argc = parse_options(argc, argv,
  649. report_options, report_usage, 0);
  650. if (argc)
  651. usage_with_options(report_usage, report_options);
  652. }
  653. __cmd_report();
  654. } else if (!strcmp(argv[0], "trace")) {
  655. /* Aliased to 'perf trace' */
  656. return cmd_trace(argc, argv, prefix);
  657. } else if (!strcmp(argv[0], "map")) {
  658. /* recycling report_lock_ops */
  659. trace_handler = &report_lock_ops;
  660. setup_pager();
  661. read_events();
  662. dump_map();
  663. } else {
  664. usage_with_options(lock_usage, lock_options);
  665. }
  666. return 0;
  667. }