builtin-timechart.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /*
  2. * builtin-timechart.c - make an svg timechart of system activity
  3. *
  4. * (C) Copyright 2009 Intel Corporation
  5. *
  6. * Authors:
  7. * Arjan van de Ven <arjan@linux.intel.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #include "builtin.h"
  15. #include "util/util.h"
  16. #include "util/color.h"
  17. #include <linux/list.h>
  18. #include "util/cache.h"
  19. #include "util/evsel.h"
  20. #include <linux/rbtree.h>
  21. #include "util/symbol.h"
  22. #include "util/callchain.h"
  23. #include "util/strlist.h"
  24. #include "perf.h"
  25. #include "util/header.h"
  26. #include "util/parse-options.h"
  27. #include "util/parse-events.h"
  28. #include "util/event.h"
  29. #include "util/session.h"
  30. #include "util/svghelper.h"
  31. #define SUPPORT_OLD_POWER_EVENTS 1
  32. #define PWR_EVENT_EXIT -1
  33. static char const *input_name = "perf.data";
  34. static char const *output_name = "output.svg";
  35. static unsigned int numcpus;
  36. static u64 min_freq; /* Lowest CPU frequency seen */
  37. static u64 max_freq; /* Highest CPU frequency seen */
  38. static u64 turbo_frequency;
  39. static u64 first_time, last_time;
  40. static bool power_only;
  41. struct per_pid;
  42. struct per_pidcomm;
  43. struct cpu_sample;
  44. struct power_event;
  45. struct wake_event;
  46. struct sample_wrapper;
  47. /*
  48. * Datastructure layout:
  49. * We keep an list of "pid"s, matching the kernels notion of a task struct.
  50. * Each "pid" entry, has a list of "comm"s.
  51. * this is because we want to track different programs different, while
  52. * exec will reuse the original pid (by design).
  53. * Each comm has a list of samples that will be used to draw
  54. * final graph.
  55. */
  56. struct per_pid {
  57. struct per_pid *next;
  58. int pid;
  59. int ppid;
  60. u64 start_time;
  61. u64 end_time;
  62. u64 total_time;
  63. int display;
  64. struct per_pidcomm *all;
  65. struct per_pidcomm *current;
  66. };
  67. struct per_pidcomm {
  68. struct per_pidcomm *next;
  69. u64 start_time;
  70. u64 end_time;
  71. u64 total_time;
  72. int Y;
  73. int display;
  74. long state;
  75. u64 state_since;
  76. char *comm;
  77. struct cpu_sample *samples;
  78. };
  79. struct sample_wrapper {
  80. struct sample_wrapper *next;
  81. u64 timestamp;
  82. unsigned char data[0];
  83. };
  84. #define TYPE_NONE 0
  85. #define TYPE_RUNNING 1
  86. #define TYPE_WAITING 2
  87. #define TYPE_BLOCKED 3
  88. struct cpu_sample {
  89. struct cpu_sample *next;
  90. u64 start_time;
  91. u64 end_time;
  92. int type;
  93. int cpu;
  94. };
  95. static struct per_pid *all_data;
  96. #define CSTATE 1
  97. #define PSTATE 2
  98. struct power_event {
  99. struct power_event *next;
  100. int type;
  101. int state;
  102. u64 start_time;
  103. u64 end_time;
  104. int cpu;
  105. };
  106. struct wake_event {
  107. struct wake_event *next;
  108. int waker;
  109. int wakee;
  110. u64 time;
  111. };
  112. static struct power_event *power_events;
  113. static struct wake_event *wake_events;
  114. struct process_filter;
  115. struct process_filter {
  116. char *name;
  117. int pid;
  118. struct process_filter *next;
  119. };
  120. static struct process_filter *process_filter;
  121. static struct per_pid *find_create_pid(int pid)
  122. {
  123. struct per_pid *cursor = all_data;
  124. while (cursor) {
  125. if (cursor->pid == pid)
  126. return cursor;
  127. cursor = cursor->next;
  128. }
  129. cursor = malloc(sizeof(struct per_pid));
  130. assert(cursor != NULL);
  131. memset(cursor, 0, sizeof(struct per_pid));
  132. cursor->pid = pid;
  133. cursor->next = all_data;
  134. all_data = cursor;
  135. return cursor;
  136. }
  137. static void pid_set_comm(int pid, char *comm)
  138. {
  139. struct per_pid *p;
  140. struct per_pidcomm *c;
  141. p = find_create_pid(pid);
  142. c = p->all;
  143. while (c) {
  144. if (c->comm && strcmp(c->comm, comm) == 0) {
  145. p->current = c;
  146. return;
  147. }
  148. if (!c->comm) {
  149. c->comm = strdup(comm);
  150. p->current = c;
  151. return;
  152. }
  153. c = c->next;
  154. }
  155. c = malloc(sizeof(struct per_pidcomm));
  156. assert(c != NULL);
  157. memset(c, 0, sizeof(struct per_pidcomm));
  158. c->comm = strdup(comm);
  159. p->current = c;
  160. c->next = p->all;
  161. p->all = c;
  162. }
  163. static void pid_fork(int pid, int ppid, u64 timestamp)
  164. {
  165. struct per_pid *p, *pp;
  166. p = find_create_pid(pid);
  167. pp = find_create_pid(ppid);
  168. p->ppid = ppid;
  169. if (pp->current && pp->current->comm && !p->current)
  170. pid_set_comm(pid, pp->current->comm);
  171. p->start_time = timestamp;
  172. if (p->current) {
  173. p->current->start_time = timestamp;
  174. p->current->state_since = timestamp;
  175. }
  176. }
  177. static void pid_exit(int pid, u64 timestamp)
  178. {
  179. struct per_pid *p;
  180. p = find_create_pid(pid);
  181. p->end_time = timestamp;
  182. if (p->current)
  183. p->current->end_time = timestamp;
  184. }
  185. static void
  186. pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
  187. {
  188. struct per_pid *p;
  189. struct per_pidcomm *c;
  190. struct cpu_sample *sample;
  191. p = find_create_pid(pid);
  192. c = p->current;
  193. if (!c) {
  194. c = malloc(sizeof(struct per_pidcomm));
  195. assert(c != NULL);
  196. memset(c, 0, sizeof(struct per_pidcomm));
  197. p->current = c;
  198. c->next = p->all;
  199. p->all = c;
  200. }
  201. sample = malloc(sizeof(struct cpu_sample));
  202. assert(sample != NULL);
  203. memset(sample, 0, sizeof(struct cpu_sample));
  204. sample->start_time = start;
  205. sample->end_time = end;
  206. sample->type = type;
  207. sample->next = c->samples;
  208. sample->cpu = cpu;
  209. c->samples = sample;
  210. if (sample->type == TYPE_RUNNING && end > start && start > 0) {
  211. c->total_time += (end-start);
  212. p->total_time += (end-start);
  213. }
  214. if (c->start_time == 0 || c->start_time > start)
  215. c->start_time = start;
  216. if (p->start_time == 0 || p->start_time > start)
  217. p->start_time = start;
  218. }
  219. #define MAX_CPUS 4096
  220. static u64 cpus_cstate_start_times[MAX_CPUS];
  221. static int cpus_cstate_state[MAX_CPUS];
  222. static u64 cpus_pstate_start_times[MAX_CPUS];
  223. static u64 cpus_pstate_state[MAX_CPUS];
  224. static int process_comm_event(struct perf_event_ops *ops __used,
  225. union perf_event *event,
  226. struct perf_sample *sample __used,
  227. struct perf_session *session __used)
  228. {
  229. pid_set_comm(event->comm.tid, event->comm.comm);
  230. return 0;
  231. }
  232. static int process_fork_event(struct perf_event_ops *ops __used,
  233. union perf_event *event,
  234. struct perf_sample *sample __used,
  235. struct perf_session *session __used)
  236. {
  237. pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
  238. return 0;
  239. }
  240. static int process_exit_event(struct perf_event_ops *ops __used,
  241. union perf_event *event,
  242. struct perf_sample *sample __used,
  243. struct perf_session *session __used)
  244. {
  245. pid_exit(event->fork.pid, event->fork.time);
  246. return 0;
  247. }
  248. struct trace_entry {
  249. unsigned short type;
  250. unsigned char flags;
  251. unsigned char preempt_count;
  252. int pid;
  253. int lock_depth;
  254. };
  255. #ifdef SUPPORT_OLD_POWER_EVENTS
  256. static int use_old_power_events;
  257. struct power_entry_old {
  258. struct trace_entry te;
  259. u64 type;
  260. u64 value;
  261. u64 cpu_id;
  262. };
  263. #endif
  264. struct power_processor_entry {
  265. struct trace_entry te;
  266. u32 state;
  267. u32 cpu_id;
  268. };
  269. #define TASK_COMM_LEN 16
  270. struct wakeup_entry {
  271. struct trace_entry te;
  272. char comm[TASK_COMM_LEN];
  273. int pid;
  274. int prio;
  275. int success;
  276. };
  277. /*
  278. * trace_flag_type is an enumeration that holds different
  279. * states when a trace occurs. These are:
  280. * IRQS_OFF - interrupts were disabled
  281. * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
  282. * NEED_RESCED - reschedule is requested
  283. * HARDIRQ - inside an interrupt handler
  284. * SOFTIRQ - inside a softirq handler
  285. */
  286. enum trace_flag_type {
  287. TRACE_FLAG_IRQS_OFF = 0x01,
  288. TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
  289. TRACE_FLAG_NEED_RESCHED = 0x04,
  290. TRACE_FLAG_HARDIRQ = 0x08,
  291. TRACE_FLAG_SOFTIRQ = 0x10,
  292. };
  293. struct sched_switch {
  294. struct trace_entry te;
  295. char prev_comm[TASK_COMM_LEN];
  296. int prev_pid;
  297. int prev_prio;
  298. long prev_state; /* Arjan weeps. */
  299. char next_comm[TASK_COMM_LEN];
  300. int next_pid;
  301. int next_prio;
  302. };
  303. static void c_state_start(int cpu, u64 timestamp, int state)
  304. {
  305. cpus_cstate_start_times[cpu] = timestamp;
  306. cpus_cstate_state[cpu] = state;
  307. }
  308. static void c_state_end(int cpu, u64 timestamp)
  309. {
  310. struct power_event *pwr;
  311. pwr = malloc(sizeof(struct power_event));
  312. if (!pwr)
  313. return;
  314. memset(pwr, 0, sizeof(struct power_event));
  315. pwr->state = cpus_cstate_state[cpu];
  316. pwr->start_time = cpus_cstate_start_times[cpu];
  317. pwr->end_time = timestamp;
  318. pwr->cpu = cpu;
  319. pwr->type = CSTATE;
  320. pwr->next = power_events;
  321. power_events = pwr;
  322. }
  323. static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
  324. {
  325. struct power_event *pwr;
  326. pwr = malloc(sizeof(struct power_event));
  327. if (new_freq > 8000000) /* detect invalid data */
  328. return;
  329. if (!pwr)
  330. return;
  331. memset(pwr, 0, sizeof(struct power_event));
  332. pwr->state = cpus_pstate_state[cpu];
  333. pwr->start_time = cpus_pstate_start_times[cpu];
  334. pwr->end_time = timestamp;
  335. pwr->cpu = cpu;
  336. pwr->type = PSTATE;
  337. pwr->next = power_events;
  338. if (!pwr->start_time)
  339. pwr->start_time = first_time;
  340. power_events = pwr;
  341. cpus_pstate_state[cpu] = new_freq;
  342. cpus_pstate_start_times[cpu] = timestamp;
  343. if ((u64)new_freq > max_freq)
  344. max_freq = new_freq;
  345. if (new_freq < min_freq || min_freq == 0)
  346. min_freq = new_freq;
  347. if (new_freq == max_freq - 1000)
  348. turbo_frequency = max_freq;
  349. }
  350. static void
  351. sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
  352. {
  353. struct wake_event *we;
  354. struct per_pid *p;
  355. struct wakeup_entry *wake = (void *)te;
  356. we = malloc(sizeof(struct wake_event));
  357. if (!we)
  358. return;
  359. memset(we, 0, sizeof(struct wake_event));
  360. we->time = timestamp;
  361. we->waker = pid;
  362. if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
  363. we->waker = -1;
  364. we->wakee = wake->pid;
  365. we->next = wake_events;
  366. wake_events = we;
  367. p = find_create_pid(we->wakee);
  368. if (p && p->current && p->current->state == TYPE_NONE) {
  369. p->current->state_since = timestamp;
  370. p->current->state = TYPE_WAITING;
  371. }
  372. if (p && p->current && p->current->state == TYPE_BLOCKED) {
  373. pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
  374. p->current->state_since = timestamp;
  375. p->current->state = TYPE_WAITING;
  376. }
  377. }
  378. static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
  379. {
  380. struct per_pid *p = NULL, *prev_p;
  381. struct sched_switch *sw = (void *)te;
  382. prev_p = find_create_pid(sw->prev_pid);
  383. p = find_create_pid(sw->next_pid);
  384. if (prev_p->current && prev_p->current->state != TYPE_NONE)
  385. pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
  386. if (p && p->current) {
  387. if (p->current->state != TYPE_NONE)
  388. pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
  389. p->current->state_since = timestamp;
  390. p->current->state = TYPE_RUNNING;
  391. }
  392. if (prev_p->current) {
  393. prev_p->current->state = TYPE_NONE;
  394. prev_p->current->state_since = timestamp;
  395. if (sw->prev_state & 2)
  396. prev_p->current->state = TYPE_BLOCKED;
  397. if (sw->prev_state == 0)
  398. prev_p->current->state = TYPE_WAITING;
  399. }
  400. }
  401. static int process_sample_event(struct perf_event_ops *ops __used,
  402. union perf_event *event __used,
  403. struct perf_sample *sample,
  404. struct perf_evsel *evsel,
  405. struct perf_session *session __used)
  406. {
  407. struct trace_entry *te;
  408. if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
  409. if (!first_time || first_time > sample->time)
  410. first_time = sample->time;
  411. if (last_time < sample->time)
  412. last_time = sample->time;
  413. }
  414. te = (void *)sample->raw_data;
  415. if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
  416. char *event_str;
  417. #ifdef SUPPORT_OLD_POWER_EVENTS
  418. struct power_entry_old *peo;
  419. peo = (void *)te;
  420. #endif
  421. /*
  422. * FIXME: use evsel, its already mapped from id to perf_evsel,
  423. * remove perf_header__find_event infrastructure bits.
  424. * Mapping all these "power:cpu_idle" strings to the tracepoint
  425. * ID and then just comparing against evsel->attr.config.
  426. *
  427. * e.g.:
  428. *
  429. * if (evsel->attr.config == power_cpu_idle_id)
  430. */
  431. event_str = perf_header__find_event(te->type);
  432. if (!event_str)
  433. return 0;
  434. if (sample->cpu > numcpus)
  435. numcpus = sample->cpu;
  436. if (strcmp(event_str, "power:cpu_idle") == 0) {
  437. struct power_processor_entry *ppe = (void *)te;
  438. if (ppe->state == (u32)PWR_EVENT_EXIT)
  439. c_state_end(ppe->cpu_id, sample->time);
  440. else
  441. c_state_start(ppe->cpu_id, sample->time,
  442. ppe->state);
  443. }
  444. else if (strcmp(event_str, "power:cpu_frequency") == 0) {
  445. struct power_processor_entry *ppe = (void *)te;
  446. p_state_change(ppe->cpu_id, sample->time, ppe->state);
  447. }
  448. else if (strcmp(event_str, "sched:sched_wakeup") == 0)
  449. sched_wakeup(sample->cpu, sample->time, sample->pid, te);
  450. else if (strcmp(event_str, "sched:sched_switch") == 0)
  451. sched_switch(sample->cpu, sample->time, te);
  452. #ifdef SUPPORT_OLD_POWER_EVENTS
  453. if (use_old_power_events) {
  454. if (strcmp(event_str, "power:power_start") == 0)
  455. c_state_start(peo->cpu_id, sample->time,
  456. peo->value);
  457. else if (strcmp(event_str, "power:power_end") == 0)
  458. c_state_end(sample->cpu, sample->time);
  459. else if (strcmp(event_str,
  460. "power:power_frequency") == 0)
  461. p_state_change(peo->cpu_id, sample->time,
  462. peo->value);
  463. }
  464. #endif
  465. }
  466. return 0;
  467. }
  468. /*
  469. * After the last sample we need to wrap up the current C/P state
  470. * and close out each CPU for these.
  471. */
  472. static void end_sample_processing(void)
  473. {
  474. u64 cpu;
  475. struct power_event *pwr;
  476. for (cpu = 0; cpu <= numcpus; cpu++) {
  477. pwr = malloc(sizeof(struct power_event));
  478. if (!pwr)
  479. return;
  480. memset(pwr, 0, sizeof(struct power_event));
  481. /* C state */
  482. #if 0
  483. pwr->state = cpus_cstate_state[cpu];
  484. pwr->start_time = cpus_cstate_start_times[cpu];
  485. pwr->end_time = last_time;
  486. pwr->cpu = cpu;
  487. pwr->type = CSTATE;
  488. pwr->next = power_events;
  489. power_events = pwr;
  490. #endif
  491. /* P state */
  492. pwr = malloc(sizeof(struct power_event));
  493. if (!pwr)
  494. return;
  495. memset(pwr, 0, sizeof(struct power_event));
  496. pwr->state = cpus_pstate_state[cpu];
  497. pwr->start_time = cpus_pstate_start_times[cpu];
  498. pwr->end_time = last_time;
  499. pwr->cpu = cpu;
  500. pwr->type = PSTATE;
  501. pwr->next = power_events;
  502. if (!pwr->start_time)
  503. pwr->start_time = first_time;
  504. if (!pwr->state)
  505. pwr->state = min_freq;
  506. power_events = pwr;
  507. }
  508. }
  509. /*
  510. * Sort the pid datastructure
  511. */
  512. static void sort_pids(void)
  513. {
  514. struct per_pid *new_list, *p, *cursor, *prev;
  515. /* sort by ppid first, then by pid, lowest to highest */
  516. new_list = NULL;
  517. while (all_data) {
  518. p = all_data;
  519. all_data = p->next;
  520. p->next = NULL;
  521. if (new_list == NULL) {
  522. new_list = p;
  523. p->next = NULL;
  524. continue;
  525. }
  526. prev = NULL;
  527. cursor = new_list;
  528. while (cursor) {
  529. if (cursor->ppid > p->ppid ||
  530. (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
  531. /* must insert before */
  532. if (prev) {
  533. p->next = prev->next;
  534. prev->next = p;
  535. cursor = NULL;
  536. continue;
  537. } else {
  538. p->next = new_list;
  539. new_list = p;
  540. cursor = NULL;
  541. continue;
  542. }
  543. }
  544. prev = cursor;
  545. cursor = cursor->next;
  546. if (!cursor)
  547. prev->next = p;
  548. }
  549. }
  550. all_data = new_list;
  551. }
  552. static void draw_c_p_states(void)
  553. {
  554. struct power_event *pwr;
  555. pwr = power_events;
  556. /*
  557. * two pass drawing so that the P state bars are on top of the C state blocks
  558. */
  559. while (pwr) {
  560. if (pwr->type == CSTATE)
  561. svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  562. pwr = pwr->next;
  563. }
  564. pwr = power_events;
  565. while (pwr) {
  566. if (pwr->type == PSTATE) {
  567. if (!pwr->state)
  568. pwr->state = min_freq;
  569. svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  570. }
  571. pwr = pwr->next;
  572. }
  573. }
  574. static void draw_wakeups(void)
  575. {
  576. struct wake_event *we;
  577. struct per_pid *p;
  578. struct per_pidcomm *c;
  579. we = wake_events;
  580. while (we) {
  581. int from = 0, to = 0;
  582. char *task_from = NULL, *task_to = NULL;
  583. /* locate the column of the waker and wakee */
  584. p = all_data;
  585. while (p) {
  586. if (p->pid == we->waker || p->pid == we->wakee) {
  587. c = p->all;
  588. while (c) {
  589. if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
  590. if (p->pid == we->waker && !from) {
  591. from = c->Y;
  592. task_from = strdup(c->comm);
  593. }
  594. if (p->pid == we->wakee && !to) {
  595. to = c->Y;
  596. task_to = strdup(c->comm);
  597. }
  598. }
  599. c = c->next;
  600. }
  601. c = p->all;
  602. while (c) {
  603. if (p->pid == we->waker && !from) {
  604. from = c->Y;
  605. task_from = strdup(c->comm);
  606. }
  607. if (p->pid == we->wakee && !to) {
  608. to = c->Y;
  609. task_to = strdup(c->comm);
  610. }
  611. c = c->next;
  612. }
  613. }
  614. p = p->next;
  615. }
  616. if (!task_from) {
  617. task_from = malloc(40);
  618. sprintf(task_from, "[%i]", we->waker);
  619. }
  620. if (!task_to) {
  621. task_to = malloc(40);
  622. sprintf(task_to, "[%i]", we->wakee);
  623. }
  624. if (we->waker == -1)
  625. svg_interrupt(we->time, to);
  626. else if (from && to && abs(from - to) == 1)
  627. svg_wakeline(we->time, from, to);
  628. else
  629. svg_partial_wakeline(we->time, from, task_from, to, task_to);
  630. we = we->next;
  631. free(task_from);
  632. free(task_to);
  633. }
  634. }
  635. static void draw_cpu_usage(void)
  636. {
  637. struct per_pid *p;
  638. struct per_pidcomm *c;
  639. struct cpu_sample *sample;
  640. p = all_data;
  641. while (p) {
  642. c = p->all;
  643. while (c) {
  644. sample = c->samples;
  645. while (sample) {
  646. if (sample->type == TYPE_RUNNING)
  647. svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
  648. sample = sample->next;
  649. }
  650. c = c->next;
  651. }
  652. p = p->next;
  653. }
  654. }
  655. static void draw_process_bars(void)
  656. {
  657. struct per_pid *p;
  658. struct per_pidcomm *c;
  659. struct cpu_sample *sample;
  660. int Y = 0;
  661. Y = 2 * numcpus + 2;
  662. p = all_data;
  663. while (p) {
  664. c = p->all;
  665. while (c) {
  666. if (!c->display) {
  667. c->Y = 0;
  668. c = c->next;
  669. continue;
  670. }
  671. svg_box(Y, c->start_time, c->end_time, "process");
  672. sample = c->samples;
  673. while (sample) {
  674. if (sample->type == TYPE_RUNNING)
  675. svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
  676. if (sample->type == TYPE_BLOCKED)
  677. svg_box(Y, sample->start_time, sample->end_time, "blocked");
  678. if (sample->type == TYPE_WAITING)
  679. svg_waiting(Y, sample->start_time, sample->end_time);
  680. sample = sample->next;
  681. }
  682. if (c->comm) {
  683. char comm[256];
  684. if (c->total_time > 5000000000) /* 5 seconds */
  685. sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
  686. else
  687. sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
  688. svg_text(Y, c->start_time, comm);
  689. }
  690. c->Y = Y;
  691. Y++;
  692. c = c->next;
  693. }
  694. p = p->next;
  695. }
  696. }
  697. static void add_process_filter(const char *string)
  698. {
  699. struct process_filter *filt;
  700. int pid;
  701. pid = strtoull(string, NULL, 10);
  702. filt = malloc(sizeof(struct process_filter));
  703. if (!filt)
  704. return;
  705. filt->name = strdup(string);
  706. filt->pid = pid;
  707. filt->next = process_filter;
  708. process_filter = filt;
  709. }
  710. static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
  711. {
  712. struct process_filter *filt;
  713. if (!process_filter)
  714. return 1;
  715. filt = process_filter;
  716. while (filt) {
  717. if (filt->pid && p->pid == filt->pid)
  718. return 1;
  719. if (strcmp(filt->name, c->comm) == 0)
  720. return 1;
  721. filt = filt->next;
  722. }
  723. return 0;
  724. }
  725. static int determine_display_tasks_filtered(void)
  726. {
  727. struct per_pid *p;
  728. struct per_pidcomm *c;
  729. int count = 0;
  730. p = all_data;
  731. while (p) {
  732. p->display = 0;
  733. if (p->start_time == 1)
  734. p->start_time = first_time;
  735. /* no exit marker, task kept running to the end */
  736. if (p->end_time == 0)
  737. p->end_time = last_time;
  738. c = p->all;
  739. while (c) {
  740. c->display = 0;
  741. if (c->start_time == 1)
  742. c->start_time = first_time;
  743. if (passes_filter(p, c)) {
  744. c->display = 1;
  745. p->display = 1;
  746. count++;
  747. }
  748. if (c->end_time == 0)
  749. c->end_time = last_time;
  750. c = c->next;
  751. }
  752. p = p->next;
  753. }
  754. return count;
  755. }
  756. static int determine_display_tasks(u64 threshold)
  757. {
  758. struct per_pid *p;
  759. struct per_pidcomm *c;
  760. int count = 0;
  761. if (process_filter)
  762. return determine_display_tasks_filtered();
  763. p = all_data;
  764. while (p) {
  765. p->display = 0;
  766. if (p->start_time == 1)
  767. p->start_time = first_time;
  768. /* no exit marker, task kept running to the end */
  769. if (p->end_time == 0)
  770. p->end_time = last_time;
  771. if (p->total_time >= threshold && !power_only)
  772. p->display = 1;
  773. c = p->all;
  774. while (c) {
  775. c->display = 0;
  776. if (c->start_time == 1)
  777. c->start_time = first_time;
  778. if (c->total_time >= threshold && !power_only) {
  779. c->display = 1;
  780. count++;
  781. }
  782. if (c->end_time == 0)
  783. c->end_time = last_time;
  784. c = c->next;
  785. }
  786. p = p->next;
  787. }
  788. return count;
  789. }
  790. #define TIME_THRESH 10000000
  791. static void write_svg_file(const char *filename)
  792. {
  793. u64 i;
  794. int count;
  795. numcpus++;
  796. count = determine_display_tasks(TIME_THRESH);
  797. /* We'd like to show at least 15 tasks; be less picky if we have fewer */
  798. if (count < 15)
  799. count = determine_display_tasks(TIME_THRESH / 10);
  800. open_svg(filename, numcpus, count, first_time, last_time);
  801. svg_time_grid();
  802. svg_legenda();
  803. for (i = 0; i < numcpus; i++)
  804. svg_cpu_box(i, max_freq, turbo_frequency);
  805. draw_cpu_usage();
  806. draw_process_bars();
  807. draw_c_p_states();
  808. draw_wakeups();
  809. svg_close();
  810. }
  811. static struct perf_event_ops event_ops = {
  812. .comm = process_comm_event,
  813. .fork = process_fork_event,
  814. .exit = process_exit_event,
  815. .sample = process_sample_event,
  816. .ordered_samples = true,
  817. };
  818. static int __cmd_timechart(void)
  819. {
  820. struct perf_session *session = perf_session__new(input_name, O_RDONLY,
  821. 0, false, &event_ops);
  822. int ret = -EINVAL;
  823. if (session == NULL)
  824. return -ENOMEM;
  825. if (!perf_session__has_traces(session, "timechart record"))
  826. goto out_delete;
  827. ret = perf_session__process_events(session, &event_ops);
  828. if (ret)
  829. goto out_delete;
  830. end_sample_processing();
  831. sort_pids();
  832. write_svg_file(output_name);
  833. pr_info("Written %2.1f seconds of trace to %s.\n",
  834. (last_time - first_time) / 1000000000.0, output_name);
  835. out_delete:
  836. perf_session__delete(session);
  837. return ret;
  838. }
  839. static const char * const timechart_usage[] = {
  840. "perf timechart [<options>] {record}",
  841. NULL
  842. };
  843. #ifdef SUPPORT_OLD_POWER_EVENTS
  844. static const char * const record_old_args[] = {
  845. "record",
  846. "-a",
  847. "-R",
  848. "-f",
  849. "-c", "1",
  850. "-e", "power:power_start",
  851. "-e", "power:power_end",
  852. "-e", "power:power_frequency",
  853. "-e", "sched:sched_wakeup",
  854. "-e", "sched:sched_switch",
  855. };
  856. #endif
  857. static const char * const record_new_args[] = {
  858. "record",
  859. "-a",
  860. "-R",
  861. "-f",
  862. "-c", "1",
  863. "-e", "power:cpu_frequency",
  864. "-e", "power:cpu_idle",
  865. "-e", "sched:sched_wakeup",
  866. "-e", "sched:sched_switch",
  867. };
  868. static int __cmd_record(int argc, const char **argv)
  869. {
  870. unsigned int rec_argc, i, j;
  871. const char **rec_argv;
  872. const char * const *record_args = record_new_args;
  873. unsigned int record_elems = ARRAY_SIZE(record_new_args);
  874. #ifdef SUPPORT_OLD_POWER_EVENTS
  875. if (!is_valid_tracepoint("power:cpu_idle") &&
  876. is_valid_tracepoint("power:power_start")) {
  877. use_old_power_events = 1;
  878. record_args = record_old_args;
  879. record_elems = ARRAY_SIZE(record_old_args);
  880. }
  881. #endif
  882. rec_argc = record_elems + argc - 1;
  883. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  884. if (rec_argv == NULL)
  885. return -ENOMEM;
  886. for (i = 0; i < record_elems; i++)
  887. rec_argv[i] = strdup(record_args[i]);
  888. for (j = 1; j < (unsigned int)argc; j++, i++)
  889. rec_argv[i] = argv[j];
  890. return cmd_record(i, rec_argv, NULL);
  891. }
  892. static int
  893. parse_process(const struct option *opt __used, const char *arg, int __used unset)
  894. {
  895. if (arg)
  896. add_process_filter(arg);
  897. return 0;
  898. }
  899. static const struct option options[] = {
  900. OPT_STRING('i', "input", &input_name, "file",
  901. "input file name"),
  902. OPT_STRING('o', "output", &output_name, "file",
  903. "output file name"),
  904. OPT_INTEGER('w', "width", &svg_page_width,
  905. "page width"),
  906. OPT_BOOLEAN('P', "power-only", &power_only,
  907. "output power data only"),
  908. OPT_CALLBACK('p', "process", NULL, "process",
  909. "process selector. Pass a pid or process name.",
  910. parse_process),
  911. OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
  912. "Look for files with symbols relative to this directory"),
  913. OPT_END()
  914. };
  915. int cmd_timechart(int argc, const char **argv, const char *prefix __used)
  916. {
  917. argc = parse_options(argc, argv, options, timechart_usage,
  918. PARSE_OPT_STOP_AT_NON_OPTION);
  919. symbol__init();
  920. if (argc && !strncmp(argv[0], "rec", 3))
  921. return __cmd_record(argc, argv);
  922. else if (argc)
  923. usage_with_options(timechart_usage, options);
  924. setup_pager();
  925. return __cmd_timechart();
  926. }