builtin-timechart.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * builtin-timechart.c - make an svg timechart of system activity
  3. *
  4. * (C) Copyright 2009 Intel Corporation
  5. *
  6. * Authors:
  7. * Arjan van de Ven <arjan@linux.intel.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #include <traceevent/event-parse.h>
  15. #include "builtin.h"
  16. #include "util/util.h"
  17. #include "util/color.h"
  18. #include <linux/list.h>
  19. #include "util/cache.h"
  20. #include "util/evsel.h"
  21. #include <linux/rbtree.h>
  22. #include "util/symbol.h"
  23. #include "util/callchain.h"
  24. #include "util/strlist.h"
  25. #include "perf.h"
  26. #include "util/header.h"
  27. #include "util/parse-options.h"
  28. #include "util/parse-events.h"
  29. #include "util/event.h"
  30. #include "util/session.h"
  31. #include "util/svghelper.h"
  32. #include "util/tool.h"
  33. #define SUPPORT_OLD_POWER_EVENTS 1
  34. #define PWR_EVENT_EXIT -1
  35. static unsigned int numcpus;
  36. static u64 min_freq; /* Lowest CPU frequency seen */
  37. static u64 max_freq; /* Highest CPU frequency seen */
  38. static u64 turbo_frequency;
  39. static u64 first_time, last_time;
  40. static bool power_only;
  41. struct per_pid;
  42. struct per_pidcomm;
  43. struct cpu_sample;
  44. struct power_event;
  45. struct wake_event;
  46. struct sample_wrapper;
  47. /*
  48. * Datastructure layout:
  49. * We keep an list of "pid"s, matching the kernels notion of a task struct.
  50. * Each "pid" entry, has a list of "comm"s.
  51. * this is because we want to track different programs different, while
  52. * exec will reuse the original pid (by design).
  53. * Each comm has a list of samples that will be used to draw
  54. * final graph.
  55. */
  56. struct per_pid {
  57. struct per_pid *next;
  58. int pid;
  59. int ppid;
  60. u64 start_time;
  61. u64 end_time;
  62. u64 total_time;
  63. int display;
  64. struct per_pidcomm *all;
  65. struct per_pidcomm *current;
  66. };
  67. struct per_pidcomm {
  68. struct per_pidcomm *next;
  69. u64 start_time;
  70. u64 end_time;
  71. u64 total_time;
  72. int Y;
  73. int display;
  74. long state;
  75. u64 state_since;
  76. char *comm;
  77. struct cpu_sample *samples;
  78. };
  79. struct sample_wrapper {
  80. struct sample_wrapper *next;
  81. u64 timestamp;
  82. unsigned char data[0];
  83. };
  84. #define TYPE_NONE 0
  85. #define TYPE_RUNNING 1
  86. #define TYPE_WAITING 2
  87. #define TYPE_BLOCKED 3
  88. struct cpu_sample {
  89. struct cpu_sample *next;
  90. u64 start_time;
  91. u64 end_time;
  92. int type;
  93. int cpu;
  94. };
  95. static struct per_pid *all_data;
  96. #define CSTATE 1
  97. #define PSTATE 2
  98. struct power_event {
  99. struct power_event *next;
  100. int type;
  101. int state;
  102. u64 start_time;
  103. u64 end_time;
  104. int cpu;
  105. };
  106. struct wake_event {
  107. struct wake_event *next;
  108. int waker;
  109. int wakee;
  110. u64 time;
  111. };
  112. static struct power_event *power_events;
  113. static struct wake_event *wake_events;
  114. struct process_filter;
  115. struct process_filter {
  116. char *name;
  117. int pid;
  118. struct process_filter *next;
  119. };
  120. static struct process_filter *process_filter;
  121. static struct per_pid *find_create_pid(int pid)
  122. {
  123. struct per_pid *cursor = all_data;
  124. while (cursor) {
  125. if (cursor->pid == pid)
  126. return cursor;
  127. cursor = cursor->next;
  128. }
  129. cursor = zalloc(sizeof(*cursor));
  130. assert(cursor != NULL);
  131. cursor->pid = pid;
  132. cursor->next = all_data;
  133. all_data = cursor;
  134. return cursor;
  135. }
  136. static void pid_set_comm(int pid, char *comm)
  137. {
  138. struct per_pid *p;
  139. struct per_pidcomm *c;
  140. p = find_create_pid(pid);
  141. c = p->all;
  142. while (c) {
  143. if (c->comm && strcmp(c->comm, comm) == 0) {
  144. p->current = c;
  145. return;
  146. }
  147. if (!c->comm) {
  148. c->comm = strdup(comm);
  149. p->current = c;
  150. return;
  151. }
  152. c = c->next;
  153. }
  154. c = zalloc(sizeof(*c));
  155. assert(c != NULL);
  156. c->comm = strdup(comm);
  157. p->current = c;
  158. c->next = p->all;
  159. p->all = c;
  160. }
  161. static void pid_fork(int pid, int ppid, u64 timestamp)
  162. {
  163. struct per_pid *p, *pp;
  164. p = find_create_pid(pid);
  165. pp = find_create_pid(ppid);
  166. p->ppid = ppid;
  167. if (pp->current && pp->current->comm && !p->current)
  168. pid_set_comm(pid, pp->current->comm);
  169. p->start_time = timestamp;
  170. if (p->current) {
  171. p->current->start_time = timestamp;
  172. p->current->state_since = timestamp;
  173. }
  174. }
  175. static void pid_exit(int pid, u64 timestamp)
  176. {
  177. struct per_pid *p;
  178. p = find_create_pid(pid);
  179. p->end_time = timestamp;
  180. if (p->current)
  181. p->current->end_time = timestamp;
  182. }
  183. static void
  184. pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
  185. {
  186. struct per_pid *p;
  187. struct per_pidcomm *c;
  188. struct cpu_sample *sample;
  189. p = find_create_pid(pid);
  190. c = p->current;
  191. if (!c) {
  192. c = zalloc(sizeof(*c));
  193. assert(c != NULL);
  194. p->current = c;
  195. c->next = p->all;
  196. p->all = c;
  197. }
  198. sample = zalloc(sizeof(*sample));
  199. assert(sample != NULL);
  200. sample->start_time = start;
  201. sample->end_time = end;
  202. sample->type = type;
  203. sample->next = c->samples;
  204. sample->cpu = cpu;
  205. c->samples = sample;
  206. if (sample->type == TYPE_RUNNING && end > start && start > 0) {
  207. c->total_time += (end-start);
  208. p->total_time += (end-start);
  209. }
  210. if (c->start_time == 0 || c->start_time > start)
  211. c->start_time = start;
  212. if (p->start_time == 0 || p->start_time > start)
  213. p->start_time = start;
  214. }
  215. #define MAX_CPUS 4096
  216. static u64 cpus_cstate_start_times[MAX_CPUS];
  217. static int cpus_cstate_state[MAX_CPUS];
  218. static u64 cpus_pstate_start_times[MAX_CPUS];
  219. static u64 cpus_pstate_state[MAX_CPUS];
  220. static int process_comm_event(struct perf_tool *tool __maybe_unused,
  221. union perf_event *event,
  222. struct perf_sample *sample __maybe_unused,
  223. struct machine *machine __maybe_unused)
  224. {
  225. pid_set_comm(event->comm.tid, event->comm.comm);
  226. return 0;
  227. }
  228. static int process_fork_event(struct perf_tool *tool __maybe_unused,
  229. union perf_event *event,
  230. struct perf_sample *sample __maybe_unused,
  231. struct machine *machine __maybe_unused)
  232. {
  233. pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
  234. return 0;
  235. }
  236. static int process_exit_event(struct perf_tool *tool __maybe_unused,
  237. union perf_event *event,
  238. struct perf_sample *sample __maybe_unused,
  239. struct machine *machine __maybe_unused)
  240. {
  241. pid_exit(event->fork.pid, event->fork.time);
  242. return 0;
  243. }
  244. struct trace_entry {
  245. unsigned short type;
  246. unsigned char flags;
  247. unsigned char preempt_count;
  248. int pid;
  249. int lock_depth;
  250. };
  251. #ifdef SUPPORT_OLD_POWER_EVENTS
  252. static int use_old_power_events;
  253. struct power_entry_old {
  254. struct trace_entry te;
  255. u64 type;
  256. u64 value;
  257. u64 cpu_id;
  258. };
  259. #endif
  260. struct power_processor_entry {
  261. struct trace_entry te;
  262. u32 state;
  263. u32 cpu_id;
  264. };
  265. #define TASK_COMM_LEN 16
  266. struct wakeup_entry {
  267. struct trace_entry te;
  268. char comm[TASK_COMM_LEN];
  269. int pid;
  270. int prio;
  271. int success;
  272. };
  273. struct sched_switch {
  274. struct trace_entry te;
  275. char prev_comm[TASK_COMM_LEN];
  276. int prev_pid;
  277. int prev_prio;
  278. long prev_state; /* Arjan weeps. */
  279. char next_comm[TASK_COMM_LEN];
  280. int next_pid;
  281. int next_prio;
  282. };
  283. static void c_state_start(int cpu, u64 timestamp, int state)
  284. {
  285. cpus_cstate_start_times[cpu] = timestamp;
  286. cpus_cstate_state[cpu] = state;
  287. }
  288. static void c_state_end(int cpu, u64 timestamp)
  289. {
  290. struct power_event *pwr = zalloc(sizeof(*pwr));
  291. if (!pwr)
  292. return;
  293. pwr->state = cpus_cstate_state[cpu];
  294. pwr->start_time = cpus_cstate_start_times[cpu];
  295. pwr->end_time = timestamp;
  296. pwr->cpu = cpu;
  297. pwr->type = CSTATE;
  298. pwr->next = power_events;
  299. power_events = pwr;
  300. }
  301. static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
  302. {
  303. struct power_event *pwr;
  304. if (new_freq > 8000000) /* detect invalid data */
  305. return;
  306. pwr = zalloc(sizeof(*pwr));
  307. if (!pwr)
  308. return;
  309. pwr->state = cpus_pstate_state[cpu];
  310. pwr->start_time = cpus_pstate_start_times[cpu];
  311. pwr->end_time = timestamp;
  312. pwr->cpu = cpu;
  313. pwr->type = PSTATE;
  314. pwr->next = power_events;
  315. if (!pwr->start_time)
  316. pwr->start_time = first_time;
  317. power_events = pwr;
  318. cpus_pstate_state[cpu] = new_freq;
  319. cpus_pstate_start_times[cpu] = timestamp;
  320. if ((u64)new_freq > max_freq)
  321. max_freq = new_freq;
  322. if (new_freq < min_freq || min_freq == 0)
  323. min_freq = new_freq;
  324. if (new_freq == max_freq - 1000)
  325. turbo_frequency = max_freq;
  326. }
  327. static void
  328. sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
  329. {
  330. struct per_pid *p;
  331. struct wakeup_entry *wake = (void *)te;
  332. struct wake_event *we = zalloc(sizeof(*we));
  333. if (!we)
  334. return;
  335. we->time = timestamp;
  336. we->waker = pid;
  337. if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
  338. we->waker = -1;
  339. we->wakee = wake->pid;
  340. we->next = wake_events;
  341. wake_events = we;
  342. p = find_create_pid(we->wakee);
  343. if (p && p->current && p->current->state == TYPE_NONE) {
  344. p->current->state_since = timestamp;
  345. p->current->state = TYPE_WAITING;
  346. }
  347. if (p && p->current && p->current->state == TYPE_BLOCKED) {
  348. pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
  349. p->current->state_since = timestamp;
  350. p->current->state = TYPE_WAITING;
  351. }
  352. }
  353. static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
  354. {
  355. struct per_pid *p = NULL, *prev_p;
  356. struct sched_switch *sw = (void *)te;
  357. prev_p = find_create_pid(sw->prev_pid);
  358. p = find_create_pid(sw->next_pid);
  359. if (prev_p->current && prev_p->current->state != TYPE_NONE)
  360. pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
  361. if (p && p->current) {
  362. if (p->current->state != TYPE_NONE)
  363. pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
  364. p->current->state_since = timestamp;
  365. p->current->state = TYPE_RUNNING;
  366. }
  367. if (prev_p->current) {
  368. prev_p->current->state = TYPE_NONE;
  369. prev_p->current->state_since = timestamp;
  370. if (sw->prev_state & 2)
  371. prev_p->current->state = TYPE_BLOCKED;
  372. if (sw->prev_state == 0)
  373. prev_p->current->state = TYPE_WAITING;
  374. }
  375. }
  376. static int process_sample_event(struct perf_tool *tool __maybe_unused,
  377. union perf_event *event __maybe_unused,
  378. struct perf_sample *sample,
  379. struct perf_evsel *evsel,
  380. struct machine *machine __maybe_unused)
  381. {
  382. struct trace_entry *te;
  383. if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
  384. if (!first_time || first_time > sample->time)
  385. first_time = sample->time;
  386. if (last_time < sample->time)
  387. last_time = sample->time;
  388. }
  389. te = (void *)sample->raw_data;
  390. if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
  391. char *event_str;
  392. #ifdef SUPPORT_OLD_POWER_EVENTS
  393. struct power_entry_old *peo;
  394. peo = (void *)te;
  395. #endif
  396. /*
  397. * FIXME: use evsel, its already mapped from id to perf_evsel,
  398. * remove perf_header__find_event infrastructure bits.
  399. * Mapping all these "power:cpu_idle" strings to the tracepoint
  400. * ID and then just comparing against evsel->attr.config.
  401. *
  402. * e.g.:
  403. *
  404. * if (evsel->attr.config == power_cpu_idle_id)
  405. */
  406. event_str = perf_header__find_event(te->type);
  407. if (!event_str)
  408. return 0;
  409. if (sample->cpu > numcpus)
  410. numcpus = sample->cpu;
  411. if (strcmp(event_str, "power:cpu_idle") == 0) {
  412. struct power_processor_entry *ppe = (void *)te;
  413. if (ppe->state == (u32)PWR_EVENT_EXIT)
  414. c_state_end(ppe->cpu_id, sample->time);
  415. else
  416. c_state_start(ppe->cpu_id, sample->time,
  417. ppe->state);
  418. }
  419. else if (strcmp(event_str, "power:cpu_frequency") == 0) {
  420. struct power_processor_entry *ppe = (void *)te;
  421. p_state_change(ppe->cpu_id, sample->time, ppe->state);
  422. }
  423. else if (strcmp(event_str, "sched:sched_wakeup") == 0)
  424. sched_wakeup(sample->cpu, sample->time, sample->pid, te);
  425. else if (strcmp(event_str, "sched:sched_switch") == 0)
  426. sched_switch(sample->cpu, sample->time, te);
  427. #ifdef SUPPORT_OLD_POWER_EVENTS
  428. if (use_old_power_events) {
  429. if (strcmp(event_str, "power:power_start") == 0)
  430. c_state_start(peo->cpu_id, sample->time,
  431. peo->value);
  432. else if (strcmp(event_str, "power:power_end") == 0)
  433. c_state_end(sample->cpu, sample->time);
  434. else if (strcmp(event_str,
  435. "power:power_frequency") == 0)
  436. p_state_change(peo->cpu_id, sample->time,
  437. peo->value);
  438. }
  439. #endif
  440. }
  441. return 0;
  442. }
  443. /*
  444. * After the last sample we need to wrap up the current C/P state
  445. * and close out each CPU for these.
  446. */
  447. static void end_sample_processing(void)
  448. {
  449. u64 cpu;
  450. struct power_event *pwr;
  451. for (cpu = 0; cpu <= numcpus; cpu++) {
  452. /* C state */
  453. #if 0
  454. pwr = zalloc(sizeof(*pwr));
  455. if (!pwr)
  456. return;
  457. pwr->state = cpus_cstate_state[cpu];
  458. pwr->start_time = cpus_cstate_start_times[cpu];
  459. pwr->end_time = last_time;
  460. pwr->cpu = cpu;
  461. pwr->type = CSTATE;
  462. pwr->next = power_events;
  463. power_events = pwr;
  464. #endif
  465. /* P state */
  466. pwr = zalloc(sizeof(*pwr));
  467. if (!pwr)
  468. return;
  469. pwr->state = cpus_pstate_state[cpu];
  470. pwr->start_time = cpus_pstate_start_times[cpu];
  471. pwr->end_time = last_time;
  472. pwr->cpu = cpu;
  473. pwr->type = PSTATE;
  474. pwr->next = power_events;
  475. if (!pwr->start_time)
  476. pwr->start_time = first_time;
  477. if (!pwr->state)
  478. pwr->state = min_freq;
  479. power_events = pwr;
  480. }
  481. }
  482. /*
  483. * Sort the pid datastructure
  484. */
  485. static void sort_pids(void)
  486. {
  487. struct per_pid *new_list, *p, *cursor, *prev;
  488. /* sort by ppid first, then by pid, lowest to highest */
  489. new_list = NULL;
  490. while (all_data) {
  491. p = all_data;
  492. all_data = p->next;
  493. p->next = NULL;
  494. if (new_list == NULL) {
  495. new_list = p;
  496. p->next = NULL;
  497. continue;
  498. }
  499. prev = NULL;
  500. cursor = new_list;
  501. while (cursor) {
  502. if (cursor->ppid > p->ppid ||
  503. (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
  504. /* must insert before */
  505. if (prev) {
  506. p->next = prev->next;
  507. prev->next = p;
  508. cursor = NULL;
  509. continue;
  510. } else {
  511. p->next = new_list;
  512. new_list = p;
  513. cursor = NULL;
  514. continue;
  515. }
  516. }
  517. prev = cursor;
  518. cursor = cursor->next;
  519. if (!cursor)
  520. prev->next = p;
  521. }
  522. }
  523. all_data = new_list;
  524. }
  525. static void draw_c_p_states(void)
  526. {
  527. struct power_event *pwr;
  528. pwr = power_events;
  529. /*
  530. * two pass drawing so that the P state bars are on top of the C state blocks
  531. */
  532. while (pwr) {
  533. if (pwr->type == CSTATE)
  534. svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  535. pwr = pwr->next;
  536. }
  537. pwr = power_events;
  538. while (pwr) {
  539. if (pwr->type == PSTATE) {
  540. if (!pwr->state)
  541. pwr->state = min_freq;
  542. svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  543. }
  544. pwr = pwr->next;
  545. }
  546. }
  547. static void draw_wakeups(void)
  548. {
  549. struct wake_event *we;
  550. struct per_pid *p;
  551. struct per_pidcomm *c;
  552. we = wake_events;
  553. while (we) {
  554. int from = 0, to = 0;
  555. char *task_from = NULL, *task_to = NULL;
  556. /* locate the column of the waker and wakee */
  557. p = all_data;
  558. while (p) {
  559. if (p->pid == we->waker || p->pid == we->wakee) {
  560. c = p->all;
  561. while (c) {
  562. if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
  563. if (p->pid == we->waker && !from) {
  564. from = c->Y;
  565. task_from = strdup(c->comm);
  566. }
  567. if (p->pid == we->wakee && !to) {
  568. to = c->Y;
  569. task_to = strdup(c->comm);
  570. }
  571. }
  572. c = c->next;
  573. }
  574. c = p->all;
  575. while (c) {
  576. if (p->pid == we->waker && !from) {
  577. from = c->Y;
  578. task_from = strdup(c->comm);
  579. }
  580. if (p->pid == we->wakee && !to) {
  581. to = c->Y;
  582. task_to = strdup(c->comm);
  583. }
  584. c = c->next;
  585. }
  586. }
  587. p = p->next;
  588. }
  589. if (!task_from) {
  590. task_from = malloc(40);
  591. sprintf(task_from, "[%i]", we->waker);
  592. }
  593. if (!task_to) {
  594. task_to = malloc(40);
  595. sprintf(task_to, "[%i]", we->wakee);
  596. }
  597. if (we->waker == -1)
  598. svg_interrupt(we->time, to);
  599. else if (from && to && abs(from - to) == 1)
  600. svg_wakeline(we->time, from, to);
  601. else
  602. svg_partial_wakeline(we->time, from, task_from, to, task_to);
  603. we = we->next;
  604. free(task_from);
  605. free(task_to);
  606. }
  607. }
  608. static void draw_cpu_usage(void)
  609. {
  610. struct per_pid *p;
  611. struct per_pidcomm *c;
  612. struct cpu_sample *sample;
  613. p = all_data;
  614. while (p) {
  615. c = p->all;
  616. while (c) {
  617. sample = c->samples;
  618. while (sample) {
  619. if (sample->type == TYPE_RUNNING)
  620. svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
  621. sample = sample->next;
  622. }
  623. c = c->next;
  624. }
  625. p = p->next;
  626. }
  627. }
  628. static void draw_process_bars(void)
  629. {
  630. struct per_pid *p;
  631. struct per_pidcomm *c;
  632. struct cpu_sample *sample;
  633. int Y = 0;
  634. Y = 2 * numcpus + 2;
  635. p = all_data;
  636. while (p) {
  637. c = p->all;
  638. while (c) {
  639. if (!c->display) {
  640. c->Y = 0;
  641. c = c->next;
  642. continue;
  643. }
  644. svg_box(Y, c->start_time, c->end_time, "process");
  645. sample = c->samples;
  646. while (sample) {
  647. if (sample->type == TYPE_RUNNING)
  648. svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
  649. if (sample->type == TYPE_BLOCKED)
  650. svg_box(Y, sample->start_time, sample->end_time, "blocked");
  651. if (sample->type == TYPE_WAITING)
  652. svg_waiting(Y, sample->start_time, sample->end_time);
  653. sample = sample->next;
  654. }
  655. if (c->comm) {
  656. char comm[256];
  657. if (c->total_time > 5000000000) /* 5 seconds */
  658. sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
  659. else
  660. sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
  661. svg_text(Y, c->start_time, comm);
  662. }
  663. c->Y = Y;
  664. Y++;
  665. c = c->next;
  666. }
  667. p = p->next;
  668. }
  669. }
  670. static void add_process_filter(const char *string)
  671. {
  672. int pid = strtoull(string, NULL, 10);
  673. struct process_filter *filt = malloc(sizeof(*filt));
  674. if (!filt)
  675. return;
  676. filt->name = strdup(string);
  677. filt->pid = pid;
  678. filt->next = process_filter;
  679. process_filter = filt;
  680. }
  681. static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
  682. {
  683. struct process_filter *filt;
  684. if (!process_filter)
  685. return 1;
  686. filt = process_filter;
  687. while (filt) {
  688. if (filt->pid && p->pid == filt->pid)
  689. return 1;
  690. if (strcmp(filt->name, c->comm) == 0)
  691. return 1;
  692. filt = filt->next;
  693. }
  694. return 0;
  695. }
  696. static int determine_display_tasks_filtered(void)
  697. {
  698. struct per_pid *p;
  699. struct per_pidcomm *c;
  700. int count = 0;
  701. p = all_data;
  702. while (p) {
  703. p->display = 0;
  704. if (p->start_time == 1)
  705. p->start_time = first_time;
  706. /* no exit marker, task kept running to the end */
  707. if (p->end_time == 0)
  708. p->end_time = last_time;
  709. c = p->all;
  710. while (c) {
  711. c->display = 0;
  712. if (c->start_time == 1)
  713. c->start_time = first_time;
  714. if (passes_filter(p, c)) {
  715. c->display = 1;
  716. p->display = 1;
  717. count++;
  718. }
  719. if (c->end_time == 0)
  720. c->end_time = last_time;
  721. c = c->next;
  722. }
  723. p = p->next;
  724. }
  725. return count;
  726. }
  727. static int determine_display_tasks(u64 threshold)
  728. {
  729. struct per_pid *p;
  730. struct per_pidcomm *c;
  731. int count = 0;
  732. if (process_filter)
  733. return determine_display_tasks_filtered();
  734. p = all_data;
  735. while (p) {
  736. p->display = 0;
  737. if (p->start_time == 1)
  738. p->start_time = first_time;
  739. /* no exit marker, task kept running to the end */
  740. if (p->end_time == 0)
  741. p->end_time = last_time;
  742. if (p->total_time >= threshold && !power_only)
  743. p->display = 1;
  744. c = p->all;
  745. while (c) {
  746. c->display = 0;
  747. if (c->start_time == 1)
  748. c->start_time = first_time;
  749. if (c->total_time >= threshold && !power_only) {
  750. c->display = 1;
  751. count++;
  752. }
  753. if (c->end_time == 0)
  754. c->end_time = last_time;
  755. c = c->next;
  756. }
  757. p = p->next;
  758. }
  759. return count;
  760. }
  761. #define TIME_THRESH 10000000
  762. static void write_svg_file(const char *filename)
  763. {
  764. u64 i;
  765. int count;
  766. numcpus++;
  767. count = determine_display_tasks(TIME_THRESH);
  768. /* We'd like to show at least 15 tasks; be less picky if we have fewer */
  769. if (count < 15)
  770. count = determine_display_tasks(TIME_THRESH / 10);
  771. open_svg(filename, numcpus, count, first_time, last_time);
  772. svg_time_grid();
  773. svg_legenda();
  774. for (i = 0; i < numcpus; i++)
  775. svg_cpu_box(i, max_freq, turbo_frequency);
  776. draw_cpu_usage();
  777. draw_process_bars();
  778. draw_c_p_states();
  779. draw_wakeups();
  780. svg_close();
  781. }
  782. static int __cmd_timechart(const char *output_name)
  783. {
  784. struct perf_tool perf_timechart = {
  785. .comm = process_comm_event,
  786. .fork = process_fork_event,
  787. .exit = process_exit_event,
  788. .sample = process_sample_event,
  789. .ordered_samples = true,
  790. };
  791. struct perf_session *session = perf_session__new(input_name, O_RDONLY,
  792. 0, false, &perf_timechart);
  793. int ret = -EINVAL;
  794. if (session == NULL)
  795. return -ENOMEM;
  796. if (!perf_session__has_traces(session, "timechart record"))
  797. goto out_delete;
  798. ret = perf_session__process_events(session, &perf_timechart);
  799. if (ret)
  800. goto out_delete;
  801. end_sample_processing();
  802. sort_pids();
  803. write_svg_file(output_name);
  804. pr_info("Written %2.1f seconds of trace to %s.\n",
  805. (last_time - first_time) / 1000000000.0, output_name);
  806. out_delete:
  807. perf_session__delete(session);
  808. return ret;
  809. }
  810. static int __cmd_record(int argc, const char **argv)
  811. {
  812. #ifdef SUPPORT_OLD_POWER_EVENTS
  813. const char * const record_old_args[] = {
  814. "record", "-a", "-R", "-c", "1",
  815. "-e", "power:power_start",
  816. "-e", "power:power_end",
  817. "-e", "power:power_frequency",
  818. "-e", "sched:sched_wakeup",
  819. "-e", "sched:sched_switch",
  820. };
  821. #endif
  822. const char * const record_new_args[] = {
  823. "record", "-a", "-R", "-c", "1",
  824. "-e", "power:cpu_frequency",
  825. "-e", "power:cpu_idle",
  826. "-e", "sched:sched_wakeup",
  827. "-e", "sched:sched_switch",
  828. };
  829. unsigned int rec_argc, i, j;
  830. const char **rec_argv;
  831. const char * const *record_args = record_new_args;
  832. unsigned int record_elems = ARRAY_SIZE(record_new_args);
  833. #ifdef SUPPORT_OLD_POWER_EVENTS
  834. if (!is_valid_tracepoint("power:cpu_idle") &&
  835. is_valid_tracepoint("power:power_start")) {
  836. use_old_power_events = 1;
  837. record_args = record_old_args;
  838. record_elems = ARRAY_SIZE(record_old_args);
  839. }
  840. #endif
  841. rec_argc = record_elems + argc - 1;
  842. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  843. if (rec_argv == NULL)
  844. return -ENOMEM;
  845. for (i = 0; i < record_elems; i++)
  846. rec_argv[i] = strdup(record_args[i]);
  847. for (j = 1; j < (unsigned int)argc; j++, i++)
  848. rec_argv[i] = argv[j];
  849. return cmd_record(i, rec_argv, NULL);
  850. }
  851. static int
  852. parse_process(const struct option *opt __maybe_unused, const char *arg,
  853. int __maybe_unused unset)
  854. {
  855. if (arg)
  856. add_process_filter(arg);
  857. return 0;
  858. }
  859. int cmd_timechart(int argc, const char **argv,
  860. const char *prefix __maybe_unused)
  861. {
  862. const char *output_name = "output.svg";
  863. const struct option options[] = {
  864. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  865. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  866. OPT_INTEGER('w', "width", &svg_page_width, "page width"),
  867. OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"),
  868. OPT_CALLBACK('p', "process", NULL, "process",
  869. "process selector. Pass a pid or process name.",
  870. parse_process),
  871. OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
  872. "Look for files with symbols relative to this directory"),
  873. OPT_END()
  874. };
  875. const char * const timechart_usage[] = {
  876. "perf timechart [<options>] {record}",
  877. NULL
  878. };
  879. argc = parse_options(argc, argv, options, timechart_usage,
  880. PARSE_OPT_STOP_AT_NON_OPTION);
  881. symbol__init();
  882. if (argc && !strncmp(argv[0], "rec", 3))
  883. return __cmd_record(argc, argv);
  884. else if (argc)
  885. usage_with_options(timechart_usage, options);
  886. setup_pager();
  887. return __cmd_timechart(output_name);
  888. }