builtin-lock.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. #include "builtin.h"
  2. #include "perf.h"
  3. #include "util/evlist.h"
  4. #include "util/evsel.h"
  5. #include "util/util.h"
  6. #include "util/cache.h"
  7. #include "util/symbol.h"
  8. #include "util/thread.h"
  9. #include "util/header.h"
  10. #include "util/parse-options.h"
  11. #include "util/trace-event.h"
  12. #include "util/debug.h"
  13. #include "util/session.h"
  14. #include "util/tool.h"
  15. #include <sys/types.h>
  16. #include <sys/prctl.h>
  17. #include <semaphore.h>
  18. #include <pthread.h>
  19. #include <math.h>
  20. #include <limits.h>
  21. #include <linux/list.h>
  22. #include <linux/hash.h>
  23. static struct perf_session *session;
  24. /* based on kernel/lockdep.c */
  25. #define LOCKHASH_BITS 12
  26. #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
  27. static struct list_head lockhash_table[LOCKHASH_SIZE];
  28. #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
  29. #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
  30. struct lock_stat {
  31. struct list_head hash_entry;
  32. struct rb_node rb; /* used for sorting */
  33. /*
  34. * FIXME: perf_evsel__intval() returns u64,
  35. * so address of lockdep_map should be dealed as 64bit.
  36. * Is there more better solution?
  37. */
  38. void *addr; /* address of lockdep_map, used as ID */
  39. char *name; /* for strcpy(), we cannot use const */
  40. unsigned int nr_acquire;
  41. unsigned int nr_acquired;
  42. unsigned int nr_contended;
  43. unsigned int nr_release;
  44. unsigned int nr_readlock;
  45. unsigned int nr_trylock;
  46. /* these times are in nano sec. */
  47. u64 avg_wait_time;
  48. u64 wait_time_total;
  49. u64 wait_time_min;
  50. u64 wait_time_max;
  51. int discard; /* flag of blacklist */
  52. };
  53. /*
  54. * States of lock_seq_stat
  55. *
  56. * UNINITIALIZED is required for detecting first event of acquire.
  57. * As the nature of lock events, there is no guarantee
  58. * that the first event for the locks are acquire,
  59. * it can be acquired, contended or release.
  60. */
  61. #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
  62. #define SEQ_STATE_RELEASED 1
  63. #define SEQ_STATE_ACQUIRING 2
  64. #define SEQ_STATE_ACQUIRED 3
  65. #define SEQ_STATE_READ_ACQUIRED 4
  66. #define SEQ_STATE_CONTENDED 5
  67. /*
  68. * MAX_LOCK_DEPTH
  69. * Imported from include/linux/sched.h.
  70. * Should this be synchronized?
  71. */
  72. #define MAX_LOCK_DEPTH 48
  73. /*
  74. * struct lock_seq_stat:
  75. * Place to put on state of one lock sequence
  76. * 1) acquire -> acquired -> release
  77. * 2) acquire -> contended -> acquired -> release
  78. * 3) acquire (with read or try) -> release
  79. * 4) Are there other patterns?
  80. */
  81. struct lock_seq_stat {
  82. struct list_head list;
  83. int state;
  84. u64 prev_event_time;
  85. void *addr;
  86. int read_count;
  87. };
  88. struct thread_stat {
  89. struct rb_node rb;
  90. u32 tid;
  91. struct list_head seq_list;
  92. };
  93. static struct rb_root thread_stats;
  94. static struct thread_stat *thread_stat_find(u32 tid)
  95. {
  96. struct rb_node *node;
  97. struct thread_stat *st;
  98. node = thread_stats.rb_node;
  99. while (node) {
  100. st = container_of(node, struct thread_stat, rb);
  101. if (st->tid == tid)
  102. return st;
  103. else if (tid < st->tid)
  104. node = node->rb_left;
  105. else
  106. node = node->rb_right;
  107. }
  108. return NULL;
  109. }
  110. static void thread_stat_insert(struct thread_stat *new)
  111. {
  112. struct rb_node **rb = &thread_stats.rb_node;
  113. struct rb_node *parent = NULL;
  114. struct thread_stat *p;
  115. while (*rb) {
  116. p = container_of(*rb, struct thread_stat, rb);
  117. parent = *rb;
  118. if (new->tid < p->tid)
  119. rb = &(*rb)->rb_left;
  120. else if (new->tid > p->tid)
  121. rb = &(*rb)->rb_right;
  122. else
  123. BUG_ON("inserting invalid thread_stat\n");
  124. }
  125. rb_link_node(&new->rb, parent, rb);
  126. rb_insert_color(&new->rb, &thread_stats);
  127. }
  128. static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
  129. {
  130. struct thread_stat *st;
  131. st = thread_stat_find(tid);
  132. if (st)
  133. return st;
  134. st = zalloc(sizeof(struct thread_stat));
  135. if (!st) {
  136. pr_err("memory allocation failed\n");
  137. return NULL;
  138. }
  139. st->tid = tid;
  140. INIT_LIST_HEAD(&st->seq_list);
  141. thread_stat_insert(st);
  142. return st;
  143. }
  144. static struct thread_stat *thread_stat_findnew_first(u32 tid);
  145. static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
  146. thread_stat_findnew_first;
  147. static struct thread_stat *thread_stat_findnew_first(u32 tid)
  148. {
  149. struct thread_stat *st;
  150. st = zalloc(sizeof(struct thread_stat));
  151. if (!st) {
  152. pr_err("memory allocation failed\n");
  153. return NULL;
  154. }
  155. st->tid = tid;
  156. INIT_LIST_HEAD(&st->seq_list);
  157. rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
  158. rb_insert_color(&st->rb, &thread_stats);
  159. thread_stat_findnew = thread_stat_findnew_after_first;
  160. return st;
  161. }
  162. /* build simple key function one is bigger than two */
  163. #define SINGLE_KEY(member) \
  164. static int lock_stat_key_ ## member(struct lock_stat *one, \
  165. struct lock_stat *two) \
  166. { \
  167. return one->member > two->member; \
  168. }
  169. SINGLE_KEY(nr_acquired)
  170. SINGLE_KEY(nr_contended)
  171. SINGLE_KEY(avg_wait_time)
  172. SINGLE_KEY(wait_time_total)
  173. SINGLE_KEY(wait_time_max)
  174. static int lock_stat_key_wait_time_min(struct lock_stat *one,
  175. struct lock_stat *two)
  176. {
  177. u64 s1 = one->wait_time_min;
  178. u64 s2 = two->wait_time_min;
  179. if (s1 == ULLONG_MAX)
  180. s1 = 0;
  181. if (s2 == ULLONG_MAX)
  182. s2 = 0;
  183. return s1 > s2;
  184. }
  185. struct lock_key {
  186. /*
  187. * name: the value for specify by user
  188. * this should be simpler than raw name of member
  189. * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
  190. */
  191. const char *name;
  192. int (*key)(struct lock_stat*, struct lock_stat*);
  193. };
  194. static const char *sort_key = "acquired";
  195. static int (*compare)(struct lock_stat *, struct lock_stat *);
  196. static struct rb_root result; /* place to store sorted data */
  197. #define DEF_KEY_LOCK(name, fn_suffix) \
  198. { #name, lock_stat_key_ ## fn_suffix }
  199. struct lock_key keys[] = {
  200. DEF_KEY_LOCK(acquired, nr_acquired),
  201. DEF_KEY_LOCK(contended, nr_contended),
  202. DEF_KEY_LOCK(avg_wait, avg_wait_time),
  203. DEF_KEY_LOCK(wait_total, wait_time_total),
  204. DEF_KEY_LOCK(wait_min, wait_time_min),
  205. DEF_KEY_LOCK(wait_max, wait_time_max),
  206. /* extra comparisons much complicated should be here */
  207. { NULL, NULL }
  208. };
  209. static int select_key(void)
  210. {
  211. int i;
  212. for (i = 0; keys[i].name; i++) {
  213. if (!strcmp(keys[i].name, sort_key)) {
  214. compare = keys[i].key;
  215. return 0;
  216. }
  217. }
  218. pr_err("Unknown compare key: %s\n", sort_key);
  219. return -1;
  220. }
  221. static void insert_to_result(struct lock_stat *st,
  222. int (*bigger)(struct lock_stat *, struct lock_stat *))
  223. {
  224. struct rb_node **rb = &result.rb_node;
  225. struct rb_node *parent = NULL;
  226. struct lock_stat *p;
  227. while (*rb) {
  228. p = container_of(*rb, struct lock_stat, rb);
  229. parent = *rb;
  230. if (bigger(st, p))
  231. rb = &(*rb)->rb_left;
  232. else
  233. rb = &(*rb)->rb_right;
  234. }
  235. rb_link_node(&st->rb, parent, rb);
  236. rb_insert_color(&st->rb, &result);
  237. }
  238. /* returns left most element of result, and erase it */
  239. static struct lock_stat *pop_from_result(void)
  240. {
  241. struct rb_node *node = result.rb_node;
  242. if (!node)
  243. return NULL;
  244. while (node->rb_left)
  245. node = node->rb_left;
  246. rb_erase(node, &result);
  247. return container_of(node, struct lock_stat, rb);
  248. }
  249. static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
  250. {
  251. struct list_head *entry = lockhashentry(addr);
  252. struct lock_stat *ret, *new;
  253. list_for_each_entry(ret, entry, hash_entry) {
  254. if (ret->addr == addr)
  255. return ret;
  256. }
  257. new = zalloc(sizeof(struct lock_stat));
  258. if (!new)
  259. goto alloc_failed;
  260. new->addr = addr;
  261. new->name = zalloc(sizeof(char) * strlen(name) + 1);
  262. if (!new->name) {
  263. free(new);
  264. goto alloc_failed;
  265. }
  266. strcpy(new->name, name);
  267. new->wait_time_min = ULLONG_MAX;
  268. list_add(&new->hash_entry, entry);
  269. return new;
  270. alloc_failed:
  271. pr_err("memory allocation failed\n");
  272. return NULL;
  273. }
  274. struct trace_lock_handler {
  275. int (*acquire_event)(struct perf_evsel *evsel,
  276. struct perf_sample *sample);
  277. int (*acquired_event)(struct perf_evsel *evsel,
  278. struct perf_sample *sample);
  279. int (*contended_event)(struct perf_evsel *evsel,
  280. struct perf_sample *sample);
  281. int (*release_event)(struct perf_evsel *evsel,
  282. struct perf_sample *sample);
  283. };
  284. static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
  285. {
  286. struct lock_seq_stat *seq;
  287. list_for_each_entry(seq, &ts->seq_list, list) {
  288. if (seq->addr == addr)
  289. return seq;
  290. }
  291. seq = zalloc(sizeof(struct lock_seq_stat));
  292. if (!seq) {
  293. pr_err("memory allocation failed\n");
  294. return NULL;
  295. }
  296. seq->state = SEQ_STATE_UNINITIALIZED;
  297. seq->addr = addr;
  298. list_add(&seq->list, &ts->seq_list);
  299. return seq;
  300. }
  301. enum broken_state {
  302. BROKEN_ACQUIRE,
  303. BROKEN_ACQUIRED,
  304. BROKEN_CONTENDED,
  305. BROKEN_RELEASE,
  306. BROKEN_MAX,
  307. };
  308. static int bad_hist[BROKEN_MAX];
  309. enum acquire_flags {
  310. TRY_LOCK = 1,
  311. READ_LOCK = 2,
  312. };
  313. static int report_lock_acquire_event(struct perf_evsel *evsel,
  314. struct perf_sample *sample)
  315. {
  316. void *addr;
  317. struct lock_stat *ls;
  318. struct thread_stat *ts;
  319. struct lock_seq_stat *seq;
  320. const char *name = perf_evsel__strval(evsel, sample, "name");
  321. u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
  322. int flag = perf_evsel__intval(evsel, sample, "flag");
  323. memcpy(&addr, &tmp, sizeof(void *));
  324. ls = lock_stat_findnew(addr, name);
  325. if (!ls)
  326. return -ENOMEM;
  327. if (ls->discard)
  328. return 0;
  329. ts = thread_stat_findnew(sample->tid);
  330. if (!ts)
  331. return -ENOMEM;
  332. seq = get_seq(ts, addr);
  333. if (!seq)
  334. return -ENOMEM;
  335. switch (seq->state) {
  336. case SEQ_STATE_UNINITIALIZED:
  337. case SEQ_STATE_RELEASED:
  338. if (!flag) {
  339. seq->state = SEQ_STATE_ACQUIRING;
  340. } else {
  341. if (flag & TRY_LOCK)
  342. ls->nr_trylock++;
  343. if (flag & READ_LOCK)
  344. ls->nr_readlock++;
  345. seq->state = SEQ_STATE_READ_ACQUIRED;
  346. seq->read_count = 1;
  347. ls->nr_acquired++;
  348. }
  349. break;
  350. case SEQ_STATE_READ_ACQUIRED:
  351. if (flag & READ_LOCK) {
  352. seq->read_count++;
  353. ls->nr_acquired++;
  354. goto end;
  355. } else {
  356. goto broken;
  357. }
  358. break;
  359. case SEQ_STATE_ACQUIRED:
  360. case SEQ_STATE_ACQUIRING:
  361. case SEQ_STATE_CONTENDED:
  362. broken:
  363. /* broken lock sequence, discard it */
  364. ls->discard = 1;
  365. bad_hist[BROKEN_ACQUIRE]++;
  366. list_del(&seq->list);
  367. free(seq);
  368. goto end;
  369. default:
  370. BUG_ON("Unknown state of lock sequence found!\n");
  371. break;
  372. }
  373. ls->nr_acquire++;
  374. seq->prev_event_time = sample->time;
  375. end:
  376. return 0;
  377. }
  378. static int report_lock_acquired_event(struct perf_evsel *evsel,
  379. struct perf_sample *sample)
  380. {
  381. void *addr;
  382. struct lock_stat *ls;
  383. struct thread_stat *ts;
  384. struct lock_seq_stat *seq;
  385. u64 contended_term;
  386. const char *name = perf_evsel__strval(evsel, sample, "name");
  387. u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
  388. memcpy(&addr, &tmp, sizeof(void *));
  389. ls = lock_stat_findnew(addr, name);
  390. if (!ls)
  391. return -ENOMEM;
  392. if (ls->discard)
  393. return 0;
  394. ts = thread_stat_findnew(sample->tid);
  395. if (!ts)
  396. return -ENOMEM;
  397. seq = get_seq(ts, addr);
  398. if (!seq)
  399. return -ENOMEM;
  400. switch (seq->state) {
  401. case SEQ_STATE_UNINITIALIZED:
  402. /* orphan event, do nothing */
  403. return 0;
  404. case SEQ_STATE_ACQUIRING:
  405. break;
  406. case SEQ_STATE_CONTENDED:
  407. contended_term = sample->time - seq->prev_event_time;
  408. ls->wait_time_total += contended_term;
  409. if (contended_term < ls->wait_time_min)
  410. ls->wait_time_min = contended_term;
  411. if (ls->wait_time_max < contended_term)
  412. ls->wait_time_max = contended_term;
  413. break;
  414. case SEQ_STATE_RELEASED:
  415. case SEQ_STATE_ACQUIRED:
  416. case SEQ_STATE_READ_ACQUIRED:
  417. /* broken lock sequence, discard it */
  418. ls->discard = 1;
  419. bad_hist[BROKEN_ACQUIRED]++;
  420. list_del(&seq->list);
  421. free(seq);
  422. goto end;
  423. default:
  424. BUG_ON("Unknown state of lock sequence found!\n");
  425. break;
  426. }
  427. seq->state = SEQ_STATE_ACQUIRED;
  428. ls->nr_acquired++;
  429. ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
  430. seq->prev_event_time = sample->time;
  431. end:
  432. return 0;
  433. }
  434. static int report_lock_contended_event(struct perf_evsel *evsel,
  435. struct perf_sample *sample)
  436. {
  437. void *addr;
  438. struct lock_stat *ls;
  439. struct thread_stat *ts;
  440. struct lock_seq_stat *seq;
  441. const char *name = perf_evsel__strval(evsel, sample, "name");
  442. u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
  443. memcpy(&addr, &tmp, sizeof(void *));
  444. ls = lock_stat_findnew(addr, name);
  445. if (!ls)
  446. return -ENOMEM;
  447. if (ls->discard)
  448. return 0;
  449. ts = thread_stat_findnew(sample->tid);
  450. if (!ts)
  451. return -ENOMEM;
  452. seq = get_seq(ts, addr);
  453. if (!seq)
  454. return -ENOMEM;
  455. switch (seq->state) {
  456. case SEQ_STATE_UNINITIALIZED:
  457. /* orphan event, do nothing */
  458. return 0;
  459. case SEQ_STATE_ACQUIRING:
  460. break;
  461. case SEQ_STATE_RELEASED:
  462. case SEQ_STATE_ACQUIRED:
  463. case SEQ_STATE_READ_ACQUIRED:
  464. case SEQ_STATE_CONTENDED:
  465. /* broken lock sequence, discard it */
  466. ls->discard = 1;
  467. bad_hist[BROKEN_CONTENDED]++;
  468. list_del(&seq->list);
  469. free(seq);
  470. goto end;
  471. default:
  472. BUG_ON("Unknown state of lock sequence found!\n");
  473. break;
  474. }
  475. seq->state = SEQ_STATE_CONTENDED;
  476. ls->nr_contended++;
  477. ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
  478. seq->prev_event_time = sample->time;
  479. end:
  480. return 0;
  481. }
  482. static int report_lock_release_event(struct perf_evsel *evsel,
  483. struct perf_sample *sample)
  484. {
  485. void *addr;
  486. struct lock_stat *ls;
  487. struct thread_stat *ts;
  488. struct lock_seq_stat *seq;
  489. const char *name = perf_evsel__strval(evsel, sample, "name");
  490. u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
  491. memcpy(&addr, &tmp, sizeof(void *));
  492. ls = lock_stat_findnew(addr, name);
  493. if (!ls)
  494. return -ENOMEM;
  495. if (ls->discard)
  496. return 0;
  497. ts = thread_stat_findnew(sample->tid);
  498. if (!ts)
  499. return -ENOMEM;
  500. seq = get_seq(ts, addr);
  501. if (!seq)
  502. return -ENOMEM;
  503. switch (seq->state) {
  504. case SEQ_STATE_UNINITIALIZED:
  505. goto end;
  506. case SEQ_STATE_ACQUIRED:
  507. break;
  508. case SEQ_STATE_READ_ACQUIRED:
  509. seq->read_count--;
  510. BUG_ON(seq->read_count < 0);
  511. if (!seq->read_count) {
  512. ls->nr_release++;
  513. goto end;
  514. }
  515. break;
  516. case SEQ_STATE_ACQUIRING:
  517. case SEQ_STATE_CONTENDED:
  518. case SEQ_STATE_RELEASED:
  519. /* broken lock sequence, discard it */
  520. ls->discard = 1;
  521. bad_hist[BROKEN_RELEASE]++;
  522. goto free_seq;
  523. default:
  524. BUG_ON("Unknown state of lock sequence found!\n");
  525. break;
  526. }
  527. ls->nr_release++;
  528. free_seq:
  529. list_del(&seq->list);
  530. free(seq);
  531. end:
  532. return 0;
  533. }
  534. /* lock oriented handlers */
  535. /* TODO: handlers for CPU oriented, thread oriented */
  536. static struct trace_lock_handler report_lock_ops = {
  537. .acquire_event = report_lock_acquire_event,
  538. .acquired_event = report_lock_acquired_event,
  539. .contended_event = report_lock_contended_event,
  540. .release_event = report_lock_release_event,
  541. };
  542. static struct trace_lock_handler *trace_handler;
  543. static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
  544. struct perf_sample *sample)
  545. {
  546. if (trace_handler->acquire_event)
  547. return trace_handler->acquire_event(evsel, sample);
  548. return 0;
  549. }
  550. static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
  551. struct perf_sample *sample)
  552. {
  553. if (trace_handler->acquired_event)
  554. return trace_handler->acquired_event(evsel, sample);
  555. return 0;
  556. }
  557. static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
  558. struct perf_sample *sample)
  559. {
  560. if (trace_handler->contended_event)
  561. return trace_handler->contended_event(evsel, sample);
  562. return 0;
  563. }
  564. static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
  565. struct perf_sample *sample)
  566. {
  567. if (trace_handler->release_event)
  568. return trace_handler->release_event(evsel, sample);
  569. return 0;
  570. }
  571. static void print_bad_events(int bad, int total)
  572. {
  573. /* Output for debug, this have to be removed */
  574. int i;
  575. const char *name[4] =
  576. { "acquire", "acquired", "contended", "release" };
  577. pr_info("\n=== output for debug===\n\n");
  578. pr_info("bad: %d, total: %d\n", bad, total);
  579. pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
  580. pr_info("histogram of events caused bad sequence\n");
  581. for (i = 0; i < BROKEN_MAX; i++)
  582. pr_info(" %10s: %d\n", name[i], bad_hist[i]);
  583. }
  584. /* TODO: various way to print, coloring, nano or milli sec */
  585. static void print_result(void)
  586. {
  587. struct lock_stat *st;
  588. char cut_name[20];
  589. int bad, total;
  590. pr_info("%20s ", "Name");
  591. pr_info("%10s ", "acquired");
  592. pr_info("%10s ", "contended");
  593. pr_info("%15s ", "avg wait (ns)");
  594. pr_info("%15s ", "total wait (ns)");
  595. pr_info("%15s ", "max wait (ns)");
  596. pr_info("%15s ", "min wait (ns)");
  597. pr_info("\n\n");
  598. bad = total = 0;
  599. while ((st = pop_from_result())) {
  600. total++;
  601. if (st->discard) {
  602. bad++;
  603. continue;
  604. }
  605. bzero(cut_name, 20);
  606. if (strlen(st->name) < 16) {
  607. /* output raw name */
  608. pr_info("%20s ", st->name);
  609. } else {
  610. strncpy(cut_name, st->name, 16);
  611. cut_name[16] = '.';
  612. cut_name[17] = '.';
  613. cut_name[18] = '.';
  614. cut_name[19] = '\0';
  615. /* cut off name for saving output style */
  616. pr_info("%20s ", cut_name);
  617. }
  618. pr_info("%10u ", st->nr_acquired);
  619. pr_info("%10u ", st->nr_contended);
  620. pr_info("%15" PRIu64 " ", st->avg_wait_time);
  621. pr_info("%15" PRIu64 " ", st->wait_time_total);
  622. pr_info("%15" PRIu64 " ", st->wait_time_max);
  623. pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
  624. 0 : st->wait_time_min);
  625. pr_info("\n");
  626. }
  627. print_bad_events(bad, total);
  628. }
  629. static bool info_threads, info_map;
  630. static void dump_threads(void)
  631. {
  632. struct thread_stat *st;
  633. struct rb_node *node;
  634. struct thread *t;
  635. pr_info("%10s: comm\n", "Thread ID");
  636. node = rb_first(&thread_stats);
  637. while (node) {
  638. st = container_of(node, struct thread_stat, rb);
  639. t = perf_session__findnew(session, st->tid);
  640. pr_info("%10d: %s\n", st->tid, t->comm);
  641. node = rb_next(node);
  642. };
  643. }
  644. static void dump_map(void)
  645. {
  646. unsigned int i;
  647. struct lock_stat *st;
  648. pr_info("Address of instance: name of class\n");
  649. for (i = 0; i < LOCKHASH_SIZE; i++) {
  650. list_for_each_entry(st, &lockhash_table[i], hash_entry) {
  651. pr_info(" %p: %s\n", st->addr, st->name);
  652. }
  653. }
  654. }
  655. static int dump_info(void)
  656. {
  657. int rc = 0;
  658. if (info_threads)
  659. dump_threads();
  660. else if (info_map)
  661. dump_map();
  662. else {
  663. rc = -1;
  664. pr_err("Unknown type of information\n");
  665. }
  666. return rc;
  667. }
  668. typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
  669. struct perf_sample *sample);
  670. static int process_sample_event(struct perf_tool *tool __maybe_unused,
  671. union perf_event *event,
  672. struct perf_sample *sample,
  673. struct perf_evsel *evsel,
  674. struct machine *machine)
  675. {
  676. struct thread *thread = machine__findnew_thread(machine, sample->pid,
  677. sample->tid);
  678. if (thread == NULL) {
  679. pr_debug("problem processing %d event, skipping it.\n",
  680. event->header.type);
  681. return -1;
  682. }
  683. if (evsel->handler.func != NULL) {
  684. tracepoint_handler f = evsel->handler.func;
  685. return f(evsel, sample);
  686. }
  687. return 0;
  688. }
  689. static void sort_result(void)
  690. {
  691. unsigned int i;
  692. struct lock_stat *st;
  693. for (i = 0; i < LOCKHASH_SIZE; i++) {
  694. list_for_each_entry(st, &lockhash_table[i], hash_entry) {
  695. insert_to_result(st, compare);
  696. }
  697. }
  698. }
  699. static const struct perf_evsel_str_handler lock_tracepoints[] = {
  700. { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
  701. { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
  702. { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
  703. { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
  704. };
  705. static int __cmd_report(bool display_info)
  706. {
  707. int err = -EINVAL;
  708. struct perf_tool eops = {
  709. .sample = process_sample_event,
  710. .comm = perf_event__process_comm,
  711. .ordered_samples = true,
  712. };
  713. session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
  714. if (!session) {
  715. pr_err("Initializing perf session failed\n");
  716. return -ENOMEM;
  717. }
  718. if (!perf_session__has_traces(session, "lock record"))
  719. goto out_delete;
  720. if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
  721. pr_err("Initializing perf session tracepoint handlers failed\n");
  722. goto out_delete;
  723. }
  724. if (select_key())
  725. goto out_delete;
  726. err = perf_session__process_events(session, &eops);
  727. if (err)
  728. goto out_delete;
  729. setup_pager();
  730. if (display_info) /* used for info subcommand */
  731. err = dump_info();
  732. else {
  733. sort_result();
  734. print_result();
  735. }
  736. out_delete:
  737. perf_session__delete(session);
  738. return err;
  739. }
  740. static int __cmd_record(int argc, const char **argv)
  741. {
  742. const char *record_args[] = {
  743. "record", "-R", "-m", "1024", "-c", "1",
  744. };
  745. unsigned int rec_argc, i, j, ret;
  746. const char **rec_argv;
  747. for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
  748. if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
  749. pr_err("tracepoint %s is not enabled. "
  750. "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
  751. lock_tracepoints[i].name);
  752. return 1;
  753. }
  754. }
  755. rec_argc = ARRAY_SIZE(record_args) + argc - 1;
  756. /* factor of 2 is for -e in front of each tracepoint */
  757. rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
  758. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  759. if (!rec_argv)
  760. return -ENOMEM;
  761. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  762. rec_argv[i] = strdup(record_args[i]);
  763. for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
  764. rec_argv[i++] = "-e";
  765. rec_argv[i++] = strdup(lock_tracepoints[j].name);
  766. }
  767. for (j = 1; j < (unsigned int)argc; j++, i++)
  768. rec_argv[i] = argv[j];
  769. BUG_ON(i != rec_argc);
  770. ret = cmd_record(i, rec_argv, NULL);
  771. free(rec_argv);
  772. return ret;
  773. }
  774. int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
  775. {
  776. const struct option info_options[] = {
  777. OPT_BOOLEAN('t', "threads", &info_threads,
  778. "dump thread list in perf.data"),
  779. OPT_BOOLEAN('m', "map", &info_map,
  780. "map of lock instances (address:name table)"),
  781. OPT_END()
  782. };
  783. const struct option lock_options[] = {
  784. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  785. OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
  786. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
  787. OPT_END()
  788. };
  789. const struct option report_options[] = {
  790. OPT_STRING('k', "key", &sort_key, "acquired",
  791. "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
  792. /* TODO: type */
  793. OPT_END()
  794. };
  795. const char * const info_usage[] = {
  796. "perf lock info [<options>]",
  797. NULL
  798. };
  799. const char * const lock_usage[] = {
  800. "perf lock [<options>] {record|report|script|info}",
  801. NULL
  802. };
  803. const char * const report_usage[] = {
  804. "perf lock report [<options>]",
  805. NULL
  806. };
  807. unsigned int i;
  808. int rc = 0;
  809. symbol__init();
  810. for (i = 0; i < LOCKHASH_SIZE; i++)
  811. INIT_LIST_HEAD(lockhash_table + i);
  812. argc = parse_options(argc, argv, lock_options, lock_usage,
  813. PARSE_OPT_STOP_AT_NON_OPTION);
  814. if (!argc)
  815. usage_with_options(lock_usage, lock_options);
  816. if (!strncmp(argv[0], "rec", 3)) {
  817. return __cmd_record(argc, argv);
  818. } else if (!strncmp(argv[0], "report", 6)) {
  819. trace_handler = &report_lock_ops;
  820. if (argc) {
  821. argc = parse_options(argc, argv,
  822. report_options, report_usage, 0);
  823. if (argc)
  824. usage_with_options(report_usage, report_options);
  825. }
  826. rc = __cmd_report(false);
  827. } else if (!strcmp(argv[0], "script")) {
  828. /* Aliased to 'perf script' */
  829. return cmd_script(argc, argv, prefix);
  830. } else if (!strcmp(argv[0], "info")) {
  831. if (argc) {
  832. argc = parse_options(argc, argv,
  833. info_options, info_usage, 0);
  834. if (argc)
  835. usage_with_options(info_usage, info_options);
  836. }
  837. /* recycling report_lock_ops */
  838. trace_handler = &report_lock_ops;
  839. rc = __cmd_report(true);
  840. } else {
  841. usage_with_options(lock_usage, lock_options);
  842. }
  843. return rc;
  844. }