builtin-timechart.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015
  1. /*
  2. * builtin-timechart.c - make an svg timechart of system activity
  3. *
  4. * (C) Copyright 2009 Intel Corporation
  5. *
  6. * Authors:
  7. * Arjan van de Ven <arjan@linux.intel.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #include <traceevent/event-parse.h>
  15. #include "builtin.h"
  16. #include "util/util.h"
  17. #include "util/color.h"
  18. #include <linux/list.h>
  19. #include "util/cache.h"
  20. #include "util/evlist.h"
  21. #include "util/evsel.h"
  22. #include <linux/rbtree.h>
  23. #include <linux/time64.h>
  24. #include "util/symbol.h"
  25. #include "util/callchain.h"
  26. #include "util/strlist.h"
  27. #include "perf.h"
  28. #include "util/header.h"
  29. #include <subcmd/parse-options.h>
  30. #include "util/parse-events.h"
  31. #include "util/event.h"
  32. #include "util/session.h"
  33. #include "util/svghelper.h"
  34. #include "util/tool.h"
  35. #include "util/data.h"
  36. #include "util/debug.h"
  37. #define SUPPORT_OLD_POWER_EVENTS 1
  38. #define PWR_EVENT_EXIT -1
  39. struct per_pid;
  40. struct power_event;
  41. struct wake_event;
  42. struct timechart {
  43. struct perf_tool tool;
  44. struct per_pid *all_data;
  45. struct power_event *power_events;
  46. struct wake_event *wake_events;
  47. int proc_num;
  48. unsigned int numcpus;
  49. u64 min_freq, /* Lowest CPU frequency seen */
  50. max_freq, /* Highest CPU frequency seen */
  51. turbo_frequency,
  52. first_time, last_time;
  53. bool power_only,
  54. tasks_only,
  55. with_backtrace,
  56. topology;
  57. bool force;
  58. /* IO related settings */
  59. bool io_only,
  60. skip_eagain;
  61. u64 io_events;
  62. u64 min_time,
  63. merge_dist;
  64. };
  65. struct per_pidcomm;
  66. struct cpu_sample;
  67. struct io_sample;
  68. /*
  69. * Datastructure layout:
  70. * We keep an list of "pid"s, matching the kernels notion of a task struct.
  71. * Each "pid" entry, has a list of "comm"s.
  72. * this is because we want to track different programs different, while
  73. * exec will reuse the original pid (by design).
  74. * Each comm has a list of samples that will be used to draw
  75. * final graph.
  76. */
  77. struct per_pid {
  78. struct per_pid *next;
  79. int pid;
  80. int ppid;
  81. u64 start_time;
  82. u64 end_time;
  83. u64 total_time;
  84. u64 total_bytes;
  85. int display;
  86. struct per_pidcomm *all;
  87. struct per_pidcomm *current;
  88. };
  89. struct per_pidcomm {
  90. struct per_pidcomm *next;
  91. u64 start_time;
  92. u64 end_time;
  93. u64 total_time;
  94. u64 max_bytes;
  95. u64 total_bytes;
  96. int Y;
  97. int display;
  98. long state;
  99. u64 state_since;
  100. char *comm;
  101. struct cpu_sample *samples;
  102. struct io_sample *io_samples;
  103. };
  104. struct sample_wrapper {
  105. struct sample_wrapper *next;
  106. u64 timestamp;
  107. unsigned char data[0];
  108. };
  109. #define TYPE_NONE 0
  110. #define TYPE_RUNNING 1
  111. #define TYPE_WAITING 2
  112. #define TYPE_BLOCKED 3
  113. struct cpu_sample {
  114. struct cpu_sample *next;
  115. u64 start_time;
  116. u64 end_time;
  117. int type;
  118. int cpu;
  119. const char *backtrace;
  120. };
  121. enum {
  122. IOTYPE_READ,
  123. IOTYPE_WRITE,
  124. IOTYPE_SYNC,
  125. IOTYPE_TX,
  126. IOTYPE_RX,
  127. IOTYPE_POLL,
  128. };
  129. struct io_sample {
  130. struct io_sample *next;
  131. u64 start_time;
  132. u64 end_time;
  133. u64 bytes;
  134. int type;
  135. int fd;
  136. int err;
  137. int merges;
  138. };
  139. #define CSTATE 1
  140. #define PSTATE 2
  141. struct power_event {
  142. struct power_event *next;
  143. int type;
  144. int state;
  145. u64 start_time;
  146. u64 end_time;
  147. int cpu;
  148. };
  149. struct wake_event {
  150. struct wake_event *next;
  151. int waker;
  152. int wakee;
  153. u64 time;
  154. const char *backtrace;
  155. };
  156. struct process_filter {
  157. char *name;
  158. int pid;
  159. struct process_filter *next;
  160. };
  161. static struct process_filter *process_filter;
  162. static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
  163. {
  164. struct per_pid *cursor = tchart->all_data;
  165. while (cursor) {
  166. if (cursor->pid == pid)
  167. return cursor;
  168. cursor = cursor->next;
  169. }
  170. cursor = zalloc(sizeof(*cursor));
  171. assert(cursor != NULL);
  172. cursor->pid = pid;
  173. cursor->next = tchart->all_data;
  174. tchart->all_data = cursor;
  175. return cursor;
  176. }
  177. static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
  178. {
  179. struct per_pid *p;
  180. struct per_pidcomm *c;
  181. p = find_create_pid(tchart, pid);
  182. c = p->all;
  183. while (c) {
  184. if (c->comm && strcmp(c->comm, comm) == 0) {
  185. p->current = c;
  186. return;
  187. }
  188. if (!c->comm) {
  189. c->comm = strdup(comm);
  190. p->current = c;
  191. return;
  192. }
  193. c = c->next;
  194. }
  195. c = zalloc(sizeof(*c));
  196. assert(c != NULL);
  197. c->comm = strdup(comm);
  198. p->current = c;
  199. c->next = p->all;
  200. p->all = c;
  201. }
  202. static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
  203. {
  204. struct per_pid *p, *pp;
  205. p = find_create_pid(tchart, pid);
  206. pp = find_create_pid(tchart, ppid);
  207. p->ppid = ppid;
  208. if (pp->current && pp->current->comm && !p->current)
  209. pid_set_comm(tchart, pid, pp->current->comm);
  210. p->start_time = timestamp;
  211. if (p->current && !p->current->start_time) {
  212. p->current->start_time = timestamp;
  213. p->current->state_since = timestamp;
  214. }
  215. }
  216. static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
  217. {
  218. struct per_pid *p;
  219. p = find_create_pid(tchart, pid);
  220. p->end_time = timestamp;
  221. if (p->current)
  222. p->current->end_time = timestamp;
  223. }
  224. static void pid_put_sample(struct timechart *tchart, int pid, int type,
  225. unsigned int cpu, u64 start, u64 end,
  226. const char *backtrace)
  227. {
  228. struct per_pid *p;
  229. struct per_pidcomm *c;
  230. struct cpu_sample *sample;
  231. p = find_create_pid(tchart, pid);
  232. c = p->current;
  233. if (!c) {
  234. c = zalloc(sizeof(*c));
  235. assert(c != NULL);
  236. p->current = c;
  237. c->next = p->all;
  238. p->all = c;
  239. }
  240. sample = zalloc(sizeof(*sample));
  241. assert(sample != NULL);
  242. sample->start_time = start;
  243. sample->end_time = end;
  244. sample->type = type;
  245. sample->next = c->samples;
  246. sample->cpu = cpu;
  247. sample->backtrace = backtrace;
  248. c->samples = sample;
  249. if (sample->type == TYPE_RUNNING && end > start && start > 0) {
  250. c->total_time += (end-start);
  251. p->total_time += (end-start);
  252. }
  253. if (c->start_time == 0 || c->start_time > start)
  254. c->start_time = start;
  255. if (p->start_time == 0 || p->start_time > start)
  256. p->start_time = start;
  257. }
  258. #define MAX_CPUS 4096
  259. static u64 cpus_cstate_start_times[MAX_CPUS];
  260. static int cpus_cstate_state[MAX_CPUS];
  261. static u64 cpus_pstate_start_times[MAX_CPUS];
  262. static u64 cpus_pstate_state[MAX_CPUS];
  263. static int process_comm_event(struct perf_tool *tool,
  264. union perf_event *event,
  265. struct perf_sample *sample __maybe_unused,
  266. struct machine *machine __maybe_unused)
  267. {
  268. struct timechart *tchart = container_of(tool, struct timechart, tool);
  269. pid_set_comm(tchart, event->comm.tid, event->comm.comm);
  270. return 0;
  271. }
  272. static int process_fork_event(struct perf_tool *tool,
  273. union perf_event *event,
  274. struct perf_sample *sample __maybe_unused,
  275. struct machine *machine __maybe_unused)
  276. {
  277. struct timechart *tchart = container_of(tool, struct timechart, tool);
  278. pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
  279. return 0;
  280. }
  281. static int process_exit_event(struct perf_tool *tool,
  282. union perf_event *event,
  283. struct perf_sample *sample __maybe_unused,
  284. struct machine *machine __maybe_unused)
  285. {
  286. struct timechart *tchart = container_of(tool, struct timechart, tool);
  287. pid_exit(tchart, event->fork.pid, event->fork.time);
  288. return 0;
  289. }
  290. #ifdef SUPPORT_OLD_POWER_EVENTS
  291. static int use_old_power_events;
  292. #endif
  293. static void c_state_start(int cpu, u64 timestamp, int state)
  294. {
  295. cpus_cstate_start_times[cpu] = timestamp;
  296. cpus_cstate_state[cpu] = state;
  297. }
  298. static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
  299. {
  300. struct power_event *pwr = zalloc(sizeof(*pwr));
  301. if (!pwr)
  302. return;
  303. pwr->state = cpus_cstate_state[cpu];
  304. pwr->start_time = cpus_cstate_start_times[cpu];
  305. pwr->end_time = timestamp;
  306. pwr->cpu = cpu;
  307. pwr->type = CSTATE;
  308. pwr->next = tchart->power_events;
  309. tchart->power_events = pwr;
  310. }
  311. static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
  312. {
  313. struct power_event *pwr;
  314. if (new_freq > 8000000) /* detect invalid data */
  315. return;
  316. pwr = zalloc(sizeof(*pwr));
  317. if (!pwr)
  318. return;
  319. pwr->state = cpus_pstate_state[cpu];
  320. pwr->start_time = cpus_pstate_start_times[cpu];
  321. pwr->end_time = timestamp;
  322. pwr->cpu = cpu;
  323. pwr->type = PSTATE;
  324. pwr->next = tchart->power_events;
  325. if (!pwr->start_time)
  326. pwr->start_time = tchart->first_time;
  327. tchart->power_events = pwr;
  328. cpus_pstate_state[cpu] = new_freq;
  329. cpus_pstate_start_times[cpu] = timestamp;
  330. if ((u64)new_freq > tchart->max_freq)
  331. tchart->max_freq = new_freq;
  332. if (new_freq < tchart->min_freq || tchart->min_freq == 0)
  333. tchart->min_freq = new_freq;
  334. if (new_freq == tchart->max_freq - 1000)
  335. tchart->turbo_frequency = tchart->max_freq;
  336. }
  337. static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
  338. int waker, int wakee, u8 flags, const char *backtrace)
  339. {
  340. struct per_pid *p;
  341. struct wake_event *we = zalloc(sizeof(*we));
  342. if (!we)
  343. return;
  344. we->time = timestamp;
  345. we->waker = waker;
  346. we->backtrace = backtrace;
  347. if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
  348. we->waker = -1;
  349. we->wakee = wakee;
  350. we->next = tchart->wake_events;
  351. tchart->wake_events = we;
  352. p = find_create_pid(tchart, we->wakee);
  353. if (p && p->current && p->current->state == TYPE_NONE) {
  354. p->current->state_since = timestamp;
  355. p->current->state = TYPE_WAITING;
  356. }
  357. if (p && p->current && p->current->state == TYPE_BLOCKED) {
  358. pid_put_sample(tchart, p->pid, p->current->state, cpu,
  359. p->current->state_since, timestamp, NULL);
  360. p->current->state_since = timestamp;
  361. p->current->state = TYPE_WAITING;
  362. }
  363. }
  364. static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
  365. int prev_pid, int next_pid, u64 prev_state,
  366. const char *backtrace)
  367. {
  368. struct per_pid *p = NULL, *prev_p;
  369. prev_p = find_create_pid(tchart, prev_pid);
  370. p = find_create_pid(tchart, next_pid);
  371. if (prev_p->current && prev_p->current->state != TYPE_NONE)
  372. pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
  373. prev_p->current->state_since, timestamp,
  374. backtrace);
  375. if (p && p->current) {
  376. if (p->current->state != TYPE_NONE)
  377. pid_put_sample(tchart, next_pid, p->current->state, cpu,
  378. p->current->state_since, timestamp,
  379. backtrace);
  380. p->current->state_since = timestamp;
  381. p->current->state = TYPE_RUNNING;
  382. }
  383. if (prev_p->current) {
  384. prev_p->current->state = TYPE_NONE;
  385. prev_p->current->state_since = timestamp;
  386. if (prev_state & 2)
  387. prev_p->current->state = TYPE_BLOCKED;
  388. if (prev_state == 0)
  389. prev_p->current->state = TYPE_WAITING;
  390. }
  391. }
  392. static const char *cat_backtrace(union perf_event *event,
  393. struct perf_sample *sample,
  394. struct machine *machine)
  395. {
  396. struct addr_location al;
  397. unsigned int i;
  398. char *p = NULL;
  399. size_t p_len;
  400. u8 cpumode = PERF_RECORD_MISC_USER;
  401. struct addr_location tal;
  402. struct ip_callchain *chain = sample->callchain;
  403. FILE *f = open_memstream(&p, &p_len);
  404. if (!f) {
  405. perror("open_memstream error");
  406. return NULL;
  407. }
  408. if (!chain)
  409. goto exit;
  410. if (machine__resolve(machine, &al, sample) < 0) {
  411. fprintf(stderr, "problem processing %d event, skipping it.\n",
  412. event->header.type);
  413. goto exit;
  414. }
  415. for (i = 0; i < chain->nr; i++) {
  416. u64 ip;
  417. if (callchain_param.order == ORDER_CALLEE)
  418. ip = chain->ips[i];
  419. else
  420. ip = chain->ips[chain->nr - i - 1];
  421. if (ip >= PERF_CONTEXT_MAX) {
  422. switch (ip) {
  423. case PERF_CONTEXT_HV:
  424. cpumode = PERF_RECORD_MISC_HYPERVISOR;
  425. break;
  426. case PERF_CONTEXT_KERNEL:
  427. cpumode = PERF_RECORD_MISC_KERNEL;
  428. break;
  429. case PERF_CONTEXT_USER:
  430. cpumode = PERF_RECORD_MISC_USER;
  431. break;
  432. default:
  433. pr_debug("invalid callchain context: "
  434. "%"PRId64"\n", (s64) ip);
  435. /*
  436. * It seems the callchain is corrupted.
  437. * Discard all.
  438. */
  439. zfree(&p);
  440. goto exit_put;
  441. }
  442. continue;
  443. }
  444. tal.filtered = 0;
  445. thread__find_addr_location(al.thread, cpumode,
  446. MAP__FUNCTION, ip, &tal);
  447. if (tal.sym)
  448. fprintf(f, "..... %016" PRIx64 " %s\n", ip,
  449. tal.sym->name);
  450. else
  451. fprintf(f, "..... %016" PRIx64 "\n", ip);
  452. }
  453. exit_put:
  454. addr_location__put(&al);
  455. exit:
  456. fclose(f);
  457. return p;
  458. }
  459. typedef int (*tracepoint_handler)(struct timechart *tchart,
  460. struct perf_evsel *evsel,
  461. struct perf_sample *sample,
  462. const char *backtrace);
  463. static int process_sample_event(struct perf_tool *tool,
  464. union perf_event *event,
  465. struct perf_sample *sample,
  466. struct perf_evsel *evsel,
  467. struct machine *machine)
  468. {
  469. struct timechart *tchart = container_of(tool, struct timechart, tool);
  470. if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
  471. if (!tchart->first_time || tchart->first_time > sample->time)
  472. tchart->first_time = sample->time;
  473. if (tchart->last_time < sample->time)
  474. tchart->last_time = sample->time;
  475. }
  476. if (evsel->handler != NULL) {
  477. tracepoint_handler f = evsel->handler;
  478. return f(tchart, evsel, sample,
  479. cat_backtrace(event, sample, machine));
  480. }
  481. return 0;
  482. }
  483. static int
  484. process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
  485. struct perf_evsel *evsel,
  486. struct perf_sample *sample,
  487. const char *backtrace __maybe_unused)
  488. {
  489. u32 state = perf_evsel__intval(evsel, sample, "state");
  490. u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  491. if (state == (u32)PWR_EVENT_EXIT)
  492. c_state_end(tchart, cpu_id, sample->time);
  493. else
  494. c_state_start(cpu_id, sample->time, state);
  495. return 0;
  496. }
  497. static int
  498. process_sample_cpu_frequency(struct timechart *tchart,
  499. struct perf_evsel *evsel,
  500. struct perf_sample *sample,
  501. const char *backtrace __maybe_unused)
  502. {
  503. u32 state = perf_evsel__intval(evsel, sample, "state");
  504. u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  505. p_state_change(tchart, cpu_id, sample->time, state);
  506. return 0;
  507. }
  508. static int
  509. process_sample_sched_wakeup(struct timechart *tchart,
  510. struct perf_evsel *evsel,
  511. struct perf_sample *sample,
  512. const char *backtrace)
  513. {
  514. u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
  515. int waker = perf_evsel__intval(evsel, sample, "common_pid");
  516. int wakee = perf_evsel__intval(evsel, sample, "pid");
  517. sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
  518. return 0;
  519. }
  520. static int
  521. process_sample_sched_switch(struct timechart *tchart,
  522. struct perf_evsel *evsel,
  523. struct perf_sample *sample,
  524. const char *backtrace)
  525. {
  526. int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
  527. int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
  528. u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
  529. sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
  530. prev_state, backtrace);
  531. return 0;
  532. }
  533. #ifdef SUPPORT_OLD_POWER_EVENTS
  534. static int
  535. process_sample_power_start(struct timechart *tchart __maybe_unused,
  536. struct perf_evsel *evsel,
  537. struct perf_sample *sample,
  538. const char *backtrace __maybe_unused)
  539. {
  540. u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  541. u64 value = perf_evsel__intval(evsel, sample, "value");
  542. c_state_start(cpu_id, sample->time, value);
  543. return 0;
  544. }
  545. static int
  546. process_sample_power_end(struct timechart *tchart,
  547. struct perf_evsel *evsel __maybe_unused,
  548. struct perf_sample *sample,
  549. const char *backtrace __maybe_unused)
  550. {
  551. c_state_end(tchart, sample->cpu, sample->time);
  552. return 0;
  553. }
  554. static int
  555. process_sample_power_frequency(struct timechart *tchart,
  556. struct perf_evsel *evsel,
  557. struct perf_sample *sample,
  558. const char *backtrace __maybe_unused)
  559. {
  560. u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  561. u64 value = perf_evsel__intval(evsel, sample, "value");
  562. p_state_change(tchart, cpu_id, sample->time, value);
  563. return 0;
  564. }
  565. #endif /* SUPPORT_OLD_POWER_EVENTS */
  566. /*
  567. * After the last sample we need to wrap up the current C/P state
  568. * and close out each CPU for these.
  569. */
  570. static void end_sample_processing(struct timechart *tchart)
  571. {
  572. u64 cpu;
  573. struct power_event *pwr;
  574. for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
  575. /* C state */
  576. #if 0
  577. pwr = zalloc(sizeof(*pwr));
  578. if (!pwr)
  579. return;
  580. pwr->state = cpus_cstate_state[cpu];
  581. pwr->start_time = cpus_cstate_start_times[cpu];
  582. pwr->end_time = tchart->last_time;
  583. pwr->cpu = cpu;
  584. pwr->type = CSTATE;
  585. pwr->next = tchart->power_events;
  586. tchart->power_events = pwr;
  587. #endif
  588. /* P state */
  589. pwr = zalloc(sizeof(*pwr));
  590. if (!pwr)
  591. return;
  592. pwr->state = cpus_pstate_state[cpu];
  593. pwr->start_time = cpus_pstate_start_times[cpu];
  594. pwr->end_time = tchart->last_time;
  595. pwr->cpu = cpu;
  596. pwr->type = PSTATE;
  597. pwr->next = tchart->power_events;
  598. if (!pwr->start_time)
  599. pwr->start_time = tchart->first_time;
  600. if (!pwr->state)
  601. pwr->state = tchart->min_freq;
  602. tchart->power_events = pwr;
  603. }
  604. }
  605. static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
  606. u64 start, int fd)
  607. {
  608. struct per_pid *p = find_create_pid(tchart, pid);
  609. struct per_pidcomm *c = p->current;
  610. struct io_sample *sample;
  611. struct io_sample *prev;
  612. if (!c) {
  613. c = zalloc(sizeof(*c));
  614. if (!c)
  615. return -ENOMEM;
  616. p->current = c;
  617. c->next = p->all;
  618. p->all = c;
  619. }
  620. prev = c->io_samples;
  621. if (prev && prev->start_time && !prev->end_time) {
  622. pr_warning("Skip invalid start event: "
  623. "previous event already started!\n");
  624. /* remove previous event that has been started,
  625. * we are not sure we will ever get an end for it */
  626. c->io_samples = prev->next;
  627. free(prev);
  628. return 0;
  629. }
  630. sample = zalloc(sizeof(*sample));
  631. if (!sample)
  632. return -ENOMEM;
  633. sample->start_time = start;
  634. sample->type = type;
  635. sample->fd = fd;
  636. sample->next = c->io_samples;
  637. c->io_samples = sample;
  638. if (c->start_time == 0 || c->start_time > start)
  639. c->start_time = start;
  640. return 0;
  641. }
  642. static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
  643. u64 end, long ret)
  644. {
  645. struct per_pid *p = find_create_pid(tchart, pid);
  646. struct per_pidcomm *c = p->current;
  647. struct io_sample *sample, *prev;
  648. if (!c) {
  649. pr_warning("Invalid pidcomm!\n");
  650. return -1;
  651. }
  652. sample = c->io_samples;
  653. if (!sample) /* skip partially captured events */
  654. return 0;
  655. if (sample->end_time) {
  656. pr_warning("Skip invalid end event: "
  657. "previous event already ended!\n");
  658. return 0;
  659. }
  660. if (sample->type != type) {
  661. pr_warning("Skip invalid end event: invalid event type!\n");
  662. return 0;
  663. }
  664. sample->end_time = end;
  665. prev = sample->next;
  666. /* we want to be able to see small and fast transfers, so make them
  667. * at least min_time long, but don't overlap them */
  668. if (sample->end_time - sample->start_time < tchart->min_time)
  669. sample->end_time = sample->start_time + tchart->min_time;
  670. if (prev && sample->start_time < prev->end_time) {
  671. if (prev->err) /* try to make errors more visible */
  672. sample->start_time = prev->end_time;
  673. else
  674. prev->end_time = sample->start_time;
  675. }
  676. if (ret < 0) {
  677. sample->err = ret;
  678. } else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
  679. type == IOTYPE_TX || type == IOTYPE_RX) {
  680. if ((u64)ret > c->max_bytes)
  681. c->max_bytes = ret;
  682. c->total_bytes += ret;
  683. p->total_bytes += ret;
  684. sample->bytes = ret;
  685. }
  686. /* merge two requests to make svg smaller and render-friendly */
  687. if (prev &&
  688. prev->type == sample->type &&
  689. prev->err == sample->err &&
  690. prev->fd == sample->fd &&
  691. prev->end_time + tchart->merge_dist >= sample->start_time) {
  692. sample->bytes += prev->bytes;
  693. sample->merges += prev->merges + 1;
  694. sample->start_time = prev->start_time;
  695. sample->next = prev->next;
  696. free(prev);
  697. if (!sample->err && sample->bytes > c->max_bytes)
  698. c->max_bytes = sample->bytes;
  699. }
  700. tchart->io_events++;
  701. return 0;
  702. }
  703. static int
  704. process_enter_read(struct timechart *tchart,
  705. struct perf_evsel *evsel,
  706. struct perf_sample *sample)
  707. {
  708. long fd = perf_evsel__intval(evsel, sample, "fd");
  709. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
  710. sample->time, fd);
  711. }
  712. static int
  713. process_exit_read(struct timechart *tchart,
  714. struct perf_evsel *evsel,
  715. struct perf_sample *sample)
  716. {
  717. long ret = perf_evsel__intval(evsel, sample, "ret");
  718. return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
  719. sample->time, ret);
  720. }
  721. static int
  722. process_enter_write(struct timechart *tchart,
  723. struct perf_evsel *evsel,
  724. struct perf_sample *sample)
  725. {
  726. long fd = perf_evsel__intval(evsel, sample, "fd");
  727. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
  728. sample->time, fd);
  729. }
  730. static int
  731. process_exit_write(struct timechart *tchart,
  732. struct perf_evsel *evsel,
  733. struct perf_sample *sample)
  734. {
  735. long ret = perf_evsel__intval(evsel, sample, "ret");
  736. return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
  737. sample->time, ret);
  738. }
  739. static int
  740. process_enter_sync(struct timechart *tchart,
  741. struct perf_evsel *evsel,
  742. struct perf_sample *sample)
  743. {
  744. long fd = perf_evsel__intval(evsel, sample, "fd");
  745. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
  746. sample->time, fd);
  747. }
  748. static int
  749. process_exit_sync(struct timechart *tchart,
  750. struct perf_evsel *evsel,
  751. struct perf_sample *sample)
  752. {
  753. long ret = perf_evsel__intval(evsel, sample, "ret");
  754. return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
  755. sample->time, ret);
  756. }
  757. static int
  758. process_enter_tx(struct timechart *tchart,
  759. struct perf_evsel *evsel,
  760. struct perf_sample *sample)
  761. {
  762. long fd = perf_evsel__intval(evsel, sample, "fd");
  763. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
  764. sample->time, fd);
  765. }
  766. static int
  767. process_exit_tx(struct timechart *tchart,
  768. struct perf_evsel *evsel,
  769. struct perf_sample *sample)
  770. {
  771. long ret = perf_evsel__intval(evsel, sample, "ret");
  772. return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
  773. sample->time, ret);
  774. }
  775. static int
  776. process_enter_rx(struct timechart *tchart,
  777. struct perf_evsel *evsel,
  778. struct perf_sample *sample)
  779. {
  780. long fd = perf_evsel__intval(evsel, sample, "fd");
  781. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
  782. sample->time, fd);
  783. }
  784. static int
  785. process_exit_rx(struct timechart *tchart,
  786. struct perf_evsel *evsel,
  787. struct perf_sample *sample)
  788. {
  789. long ret = perf_evsel__intval(evsel, sample, "ret");
  790. return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
  791. sample->time, ret);
  792. }
  793. static int
  794. process_enter_poll(struct timechart *tchart,
  795. struct perf_evsel *evsel,
  796. struct perf_sample *sample)
  797. {
  798. long fd = perf_evsel__intval(evsel, sample, "fd");
  799. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
  800. sample->time, fd);
  801. }
  802. static int
  803. process_exit_poll(struct timechart *tchart,
  804. struct perf_evsel *evsel,
  805. struct perf_sample *sample)
  806. {
  807. long ret = perf_evsel__intval(evsel, sample, "ret");
  808. return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
  809. sample->time, ret);
  810. }
  811. /*
  812. * Sort the pid datastructure
  813. */
  814. static void sort_pids(struct timechart *tchart)
  815. {
  816. struct per_pid *new_list, *p, *cursor, *prev;
  817. /* sort by ppid first, then by pid, lowest to highest */
  818. new_list = NULL;
  819. while (tchart->all_data) {
  820. p = tchart->all_data;
  821. tchart->all_data = p->next;
  822. p->next = NULL;
  823. if (new_list == NULL) {
  824. new_list = p;
  825. p->next = NULL;
  826. continue;
  827. }
  828. prev = NULL;
  829. cursor = new_list;
  830. while (cursor) {
  831. if (cursor->ppid > p->ppid ||
  832. (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
  833. /* must insert before */
  834. if (prev) {
  835. p->next = prev->next;
  836. prev->next = p;
  837. cursor = NULL;
  838. continue;
  839. } else {
  840. p->next = new_list;
  841. new_list = p;
  842. cursor = NULL;
  843. continue;
  844. }
  845. }
  846. prev = cursor;
  847. cursor = cursor->next;
  848. if (!cursor)
  849. prev->next = p;
  850. }
  851. }
  852. tchart->all_data = new_list;
  853. }
  854. static void draw_c_p_states(struct timechart *tchart)
  855. {
  856. struct power_event *pwr;
  857. pwr = tchart->power_events;
  858. /*
  859. * two pass drawing so that the P state bars are on top of the C state blocks
  860. */
  861. while (pwr) {
  862. if (pwr->type == CSTATE)
  863. svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  864. pwr = pwr->next;
  865. }
  866. pwr = tchart->power_events;
  867. while (pwr) {
  868. if (pwr->type == PSTATE) {
  869. if (!pwr->state)
  870. pwr->state = tchart->min_freq;
  871. svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  872. }
  873. pwr = pwr->next;
  874. }
  875. }
  876. static void draw_wakeups(struct timechart *tchart)
  877. {
  878. struct wake_event *we;
  879. struct per_pid *p;
  880. struct per_pidcomm *c;
  881. we = tchart->wake_events;
  882. while (we) {
  883. int from = 0, to = 0;
  884. char *task_from = NULL, *task_to = NULL;
  885. /* locate the column of the waker and wakee */
  886. p = tchart->all_data;
  887. while (p) {
  888. if (p->pid == we->waker || p->pid == we->wakee) {
  889. c = p->all;
  890. while (c) {
  891. if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
  892. if (p->pid == we->waker && !from) {
  893. from = c->Y;
  894. task_from = strdup(c->comm);
  895. }
  896. if (p->pid == we->wakee && !to) {
  897. to = c->Y;
  898. task_to = strdup(c->comm);
  899. }
  900. }
  901. c = c->next;
  902. }
  903. c = p->all;
  904. while (c) {
  905. if (p->pid == we->waker && !from) {
  906. from = c->Y;
  907. task_from = strdup(c->comm);
  908. }
  909. if (p->pid == we->wakee && !to) {
  910. to = c->Y;
  911. task_to = strdup(c->comm);
  912. }
  913. c = c->next;
  914. }
  915. }
  916. p = p->next;
  917. }
  918. if (!task_from) {
  919. task_from = malloc(40);
  920. sprintf(task_from, "[%i]", we->waker);
  921. }
  922. if (!task_to) {
  923. task_to = malloc(40);
  924. sprintf(task_to, "[%i]", we->wakee);
  925. }
  926. if (we->waker == -1)
  927. svg_interrupt(we->time, to, we->backtrace);
  928. else if (from && to && abs(from - to) == 1)
  929. svg_wakeline(we->time, from, to, we->backtrace);
  930. else
  931. svg_partial_wakeline(we->time, from, task_from, to,
  932. task_to, we->backtrace);
  933. we = we->next;
  934. free(task_from);
  935. free(task_to);
  936. }
  937. }
  938. static void draw_cpu_usage(struct timechart *tchart)
  939. {
  940. struct per_pid *p;
  941. struct per_pidcomm *c;
  942. struct cpu_sample *sample;
  943. p = tchart->all_data;
  944. while (p) {
  945. c = p->all;
  946. while (c) {
  947. sample = c->samples;
  948. while (sample) {
  949. if (sample->type == TYPE_RUNNING) {
  950. svg_process(sample->cpu,
  951. sample->start_time,
  952. sample->end_time,
  953. p->pid,
  954. c->comm,
  955. sample->backtrace);
  956. }
  957. sample = sample->next;
  958. }
  959. c = c->next;
  960. }
  961. p = p->next;
  962. }
  963. }
  964. static void draw_io_bars(struct timechart *tchart)
  965. {
  966. const char *suf;
  967. double bytes;
  968. char comm[256];
  969. struct per_pid *p;
  970. struct per_pidcomm *c;
  971. struct io_sample *sample;
  972. int Y = 1;
  973. p = tchart->all_data;
  974. while (p) {
  975. c = p->all;
  976. while (c) {
  977. if (!c->display) {
  978. c->Y = 0;
  979. c = c->next;
  980. continue;
  981. }
  982. svg_box(Y, c->start_time, c->end_time, "process3");
  983. sample = c->io_samples;
  984. for (sample = c->io_samples; sample; sample = sample->next) {
  985. double h = (double)sample->bytes / c->max_bytes;
  986. if (tchart->skip_eagain &&
  987. sample->err == -EAGAIN)
  988. continue;
  989. if (sample->err)
  990. h = 1;
  991. if (sample->type == IOTYPE_SYNC)
  992. svg_fbox(Y,
  993. sample->start_time,
  994. sample->end_time,
  995. 1,
  996. sample->err ? "error" : "sync",
  997. sample->fd,
  998. sample->err,
  999. sample->merges);
  1000. else if (sample->type == IOTYPE_POLL)
  1001. svg_fbox(Y,
  1002. sample->start_time,
  1003. sample->end_time,
  1004. 1,
  1005. sample->err ? "error" : "poll",
  1006. sample->fd,
  1007. sample->err,
  1008. sample->merges);
  1009. else if (sample->type == IOTYPE_READ)
  1010. svg_ubox(Y,
  1011. sample->start_time,
  1012. sample->end_time,
  1013. h,
  1014. sample->err ? "error" : "disk",
  1015. sample->fd,
  1016. sample->err,
  1017. sample->merges);
  1018. else if (sample->type == IOTYPE_WRITE)
  1019. svg_lbox(Y,
  1020. sample->start_time,
  1021. sample->end_time,
  1022. h,
  1023. sample->err ? "error" : "disk",
  1024. sample->fd,
  1025. sample->err,
  1026. sample->merges);
  1027. else if (sample->type == IOTYPE_RX)
  1028. svg_ubox(Y,
  1029. sample->start_time,
  1030. sample->end_time,
  1031. h,
  1032. sample->err ? "error" : "net",
  1033. sample->fd,
  1034. sample->err,
  1035. sample->merges);
  1036. else if (sample->type == IOTYPE_TX)
  1037. svg_lbox(Y,
  1038. sample->start_time,
  1039. sample->end_time,
  1040. h,
  1041. sample->err ? "error" : "net",
  1042. sample->fd,
  1043. sample->err,
  1044. sample->merges);
  1045. }
  1046. suf = "";
  1047. bytes = c->total_bytes;
  1048. if (bytes > 1024) {
  1049. bytes = bytes / 1024;
  1050. suf = "K";
  1051. }
  1052. if (bytes > 1024) {
  1053. bytes = bytes / 1024;
  1054. suf = "M";
  1055. }
  1056. if (bytes > 1024) {
  1057. bytes = bytes / 1024;
  1058. suf = "G";
  1059. }
  1060. sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
  1061. svg_text(Y, c->start_time, comm);
  1062. c->Y = Y;
  1063. Y++;
  1064. c = c->next;
  1065. }
  1066. p = p->next;
  1067. }
  1068. }
  1069. static void draw_process_bars(struct timechart *tchart)
  1070. {
  1071. struct per_pid *p;
  1072. struct per_pidcomm *c;
  1073. struct cpu_sample *sample;
  1074. int Y = 0;
  1075. Y = 2 * tchart->numcpus + 2;
  1076. p = tchart->all_data;
  1077. while (p) {
  1078. c = p->all;
  1079. while (c) {
  1080. if (!c->display) {
  1081. c->Y = 0;
  1082. c = c->next;
  1083. continue;
  1084. }
  1085. svg_box(Y, c->start_time, c->end_time, "process");
  1086. sample = c->samples;
  1087. while (sample) {
  1088. if (sample->type == TYPE_RUNNING)
  1089. svg_running(Y, sample->cpu,
  1090. sample->start_time,
  1091. sample->end_time,
  1092. sample->backtrace);
  1093. if (sample->type == TYPE_BLOCKED)
  1094. svg_blocked(Y, sample->cpu,
  1095. sample->start_time,
  1096. sample->end_time,
  1097. sample->backtrace);
  1098. if (sample->type == TYPE_WAITING)
  1099. svg_waiting(Y, sample->cpu,
  1100. sample->start_time,
  1101. sample->end_time,
  1102. sample->backtrace);
  1103. sample = sample->next;
  1104. }
  1105. if (c->comm) {
  1106. char comm[256];
  1107. if (c->total_time > 5000000000) /* 5 seconds */
  1108. sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
  1109. else
  1110. sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
  1111. svg_text(Y, c->start_time, comm);
  1112. }
  1113. c->Y = Y;
  1114. Y++;
  1115. c = c->next;
  1116. }
  1117. p = p->next;
  1118. }
  1119. }
  1120. static void add_process_filter(const char *string)
  1121. {
  1122. int pid = strtoull(string, NULL, 10);
  1123. struct process_filter *filt = malloc(sizeof(*filt));
  1124. if (!filt)
  1125. return;
  1126. filt->name = strdup(string);
  1127. filt->pid = pid;
  1128. filt->next = process_filter;
  1129. process_filter = filt;
  1130. }
  1131. static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
  1132. {
  1133. struct process_filter *filt;
  1134. if (!process_filter)
  1135. return 1;
  1136. filt = process_filter;
  1137. while (filt) {
  1138. if (filt->pid && p->pid == filt->pid)
  1139. return 1;
  1140. if (strcmp(filt->name, c->comm) == 0)
  1141. return 1;
  1142. filt = filt->next;
  1143. }
  1144. return 0;
  1145. }
  1146. static int determine_display_tasks_filtered(struct timechart *tchart)
  1147. {
  1148. struct per_pid *p;
  1149. struct per_pidcomm *c;
  1150. int count = 0;
  1151. p = tchart->all_data;
  1152. while (p) {
  1153. p->display = 0;
  1154. if (p->start_time == 1)
  1155. p->start_time = tchart->first_time;
  1156. /* no exit marker, task kept running to the end */
  1157. if (p->end_time == 0)
  1158. p->end_time = tchart->last_time;
  1159. c = p->all;
  1160. while (c) {
  1161. c->display = 0;
  1162. if (c->start_time == 1)
  1163. c->start_time = tchart->first_time;
  1164. if (passes_filter(p, c)) {
  1165. c->display = 1;
  1166. p->display = 1;
  1167. count++;
  1168. }
  1169. if (c->end_time == 0)
  1170. c->end_time = tchart->last_time;
  1171. c = c->next;
  1172. }
  1173. p = p->next;
  1174. }
  1175. return count;
  1176. }
  1177. static int determine_display_tasks(struct timechart *tchart, u64 threshold)
  1178. {
  1179. struct per_pid *p;
  1180. struct per_pidcomm *c;
  1181. int count = 0;
  1182. p = tchart->all_data;
  1183. while (p) {
  1184. p->display = 0;
  1185. if (p->start_time == 1)
  1186. p->start_time = tchart->first_time;
  1187. /* no exit marker, task kept running to the end */
  1188. if (p->end_time == 0)
  1189. p->end_time = tchart->last_time;
  1190. if (p->total_time >= threshold)
  1191. p->display = 1;
  1192. c = p->all;
  1193. while (c) {
  1194. c->display = 0;
  1195. if (c->start_time == 1)
  1196. c->start_time = tchart->first_time;
  1197. if (c->total_time >= threshold) {
  1198. c->display = 1;
  1199. count++;
  1200. }
  1201. if (c->end_time == 0)
  1202. c->end_time = tchart->last_time;
  1203. c = c->next;
  1204. }
  1205. p = p->next;
  1206. }
  1207. return count;
  1208. }
  1209. static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
  1210. {
  1211. struct per_pid *p;
  1212. struct per_pidcomm *c;
  1213. int count = 0;
  1214. p = timechart->all_data;
  1215. while (p) {
  1216. /* no exit marker, task kept running to the end */
  1217. if (p->end_time == 0)
  1218. p->end_time = timechart->last_time;
  1219. c = p->all;
  1220. while (c) {
  1221. c->display = 0;
  1222. if (c->total_bytes >= threshold) {
  1223. c->display = 1;
  1224. count++;
  1225. }
  1226. if (c->end_time == 0)
  1227. c->end_time = timechart->last_time;
  1228. c = c->next;
  1229. }
  1230. p = p->next;
  1231. }
  1232. return count;
  1233. }
  1234. #define BYTES_THRESH (1 * 1024 * 1024)
  1235. #define TIME_THRESH 10000000
  1236. static void write_svg_file(struct timechart *tchart, const char *filename)
  1237. {
  1238. u64 i;
  1239. int count;
  1240. int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
  1241. if (tchart->power_only)
  1242. tchart->proc_num = 0;
  1243. /* We'd like to show at least proc_num tasks;
  1244. * be less picky if we have fewer */
  1245. do {
  1246. if (process_filter)
  1247. count = determine_display_tasks_filtered(tchart);
  1248. else if (tchart->io_events)
  1249. count = determine_display_io_tasks(tchart, thresh);
  1250. else
  1251. count = determine_display_tasks(tchart, thresh);
  1252. thresh /= 10;
  1253. } while (!process_filter && thresh && count < tchart->proc_num);
  1254. if (!tchart->proc_num)
  1255. count = 0;
  1256. if (tchart->io_events) {
  1257. open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
  1258. svg_time_grid(0.5);
  1259. svg_io_legenda();
  1260. draw_io_bars(tchart);
  1261. } else {
  1262. open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
  1263. svg_time_grid(0);
  1264. svg_legenda();
  1265. for (i = 0; i < tchart->numcpus; i++)
  1266. svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
  1267. draw_cpu_usage(tchart);
  1268. if (tchart->proc_num)
  1269. draw_process_bars(tchart);
  1270. if (!tchart->tasks_only)
  1271. draw_c_p_states(tchart);
  1272. if (tchart->proc_num)
  1273. draw_wakeups(tchart);
  1274. }
  1275. svg_close();
  1276. }
  1277. static int process_header(struct perf_file_section *section __maybe_unused,
  1278. struct perf_header *ph,
  1279. int feat,
  1280. int fd __maybe_unused,
  1281. void *data)
  1282. {
  1283. struct timechart *tchart = data;
  1284. switch (feat) {
  1285. case HEADER_NRCPUS:
  1286. tchart->numcpus = ph->env.nr_cpus_avail;
  1287. break;
  1288. case HEADER_CPU_TOPOLOGY:
  1289. if (!tchart->topology)
  1290. break;
  1291. if (svg_build_topology_map(ph->env.sibling_cores,
  1292. ph->env.nr_sibling_cores,
  1293. ph->env.sibling_threads,
  1294. ph->env.nr_sibling_threads))
  1295. fprintf(stderr, "problem building topology\n");
  1296. break;
  1297. default:
  1298. break;
  1299. }
  1300. return 0;
  1301. }
  1302. static int __cmd_timechart(struct timechart *tchart, const char *output_name)
  1303. {
  1304. const struct perf_evsel_str_handler power_tracepoints[] = {
  1305. { "power:cpu_idle", process_sample_cpu_idle },
  1306. { "power:cpu_frequency", process_sample_cpu_frequency },
  1307. { "sched:sched_wakeup", process_sample_sched_wakeup },
  1308. { "sched:sched_switch", process_sample_sched_switch },
  1309. #ifdef SUPPORT_OLD_POWER_EVENTS
  1310. { "power:power_start", process_sample_power_start },
  1311. { "power:power_end", process_sample_power_end },
  1312. { "power:power_frequency", process_sample_power_frequency },
  1313. #endif
  1314. { "syscalls:sys_enter_read", process_enter_read },
  1315. { "syscalls:sys_enter_pread64", process_enter_read },
  1316. { "syscalls:sys_enter_readv", process_enter_read },
  1317. { "syscalls:sys_enter_preadv", process_enter_read },
  1318. { "syscalls:sys_enter_write", process_enter_write },
  1319. { "syscalls:sys_enter_pwrite64", process_enter_write },
  1320. { "syscalls:sys_enter_writev", process_enter_write },
  1321. { "syscalls:sys_enter_pwritev", process_enter_write },
  1322. { "syscalls:sys_enter_sync", process_enter_sync },
  1323. { "syscalls:sys_enter_sync_file_range", process_enter_sync },
  1324. { "syscalls:sys_enter_fsync", process_enter_sync },
  1325. { "syscalls:sys_enter_msync", process_enter_sync },
  1326. { "syscalls:sys_enter_recvfrom", process_enter_rx },
  1327. { "syscalls:sys_enter_recvmmsg", process_enter_rx },
  1328. { "syscalls:sys_enter_recvmsg", process_enter_rx },
  1329. { "syscalls:sys_enter_sendto", process_enter_tx },
  1330. { "syscalls:sys_enter_sendmsg", process_enter_tx },
  1331. { "syscalls:sys_enter_sendmmsg", process_enter_tx },
  1332. { "syscalls:sys_enter_epoll_pwait", process_enter_poll },
  1333. { "syscalls:sys_enter_epoll_wait", process_enter_poll },
  1334. { "syscalls:sys_enter_poll", process_enter_poll },
  1335. { "syscalls:sys_enter_ppoll", process_enter_poll },
  1336. { "syscalls:sys_enter_pselect6", process_enter_poll },
  1337. { "syscalls:sys_enter_select", process_enter_poll },
  1338. { "syscalls:sys_exit_read", process_exit_read },
  1339. { "syscalls:sys_exit_pread64", process_exit_read },
  1340. { "syscalls:sys_exit_readv", process_exit_read },
  1341. { "syscalls:sys_exit_preadv", process_exit_read },
  1342. { "syscalls:sys_exit_write", process_exit_write },
  1343. { "syscalls:sys_exit_pwrite64", process_exit_write },
  1344. { "syscalls:sys_exit_writev", process_exit_write },
  1345. { "syscalls:sys_exit_pwritev", process_exit_write },
  1346. { "syscalls:sys_exit_sync", process_exit_sync },
  1347. { "syscalls:sys_exit_sync_file_range", process_exit_sync },
  1348. { "syscalls:sys_exit_fsync", process_exit_sync },
  1349. { "syscalls:sys_exit_msync", process_exit_sync },
  1350. { "syscalls:sys_exit_recvfrom", process_exit_rx },
  1351. { "syscalls:sys_exit_recvmmsg", process_exit_rx },
  1352. { "syscalls:sys_exit_recvmsg", process_exit_rx },
  1353. { "syscalls:sys_exit_sendto", process_exit_tx },
  1354. { "syscalls:sys_exit_sendmsg", process_exit_tx },
  1355. { "syscalls:sys_exit_sendmmsg", process_exit_tx },
  1356. { "syscalls:sys_exit_epoll_pwait", process_exit_poll },
  1357. { "syscalls:sys_exit_epoll_wait", process_exit_poll },
  1358. { "syscalls:sys_exit_poll", process_exit_poll },
  1359. { "syscalls:sys_exit_ppoll", process_exit_poll },
  1360. { "syscalls:sys_exit_pselect6", process_exit_poll },
  1361. { "syscalls:sys_exit_select", process_exit_poll },
  1362. };
  1363. struct perf_data_file file = {
  1364. .path = input_name,
  1365. .mode = PERF_DATA_MODE_READ,
  1366. .force = tchart->force,
  1367. };
  1368. struct perf_session *session = perf_session__new(&file, false,
  1369. &tchart->tool);
  1370. int ret = -EINVAL;
  1371. if (session == NULL)
  1372. return -1;
  1373. symbol__init(&session->header.env);
  1374. (void)perf_header__process_sections(&session->header,
  1375. perf_data_file__fd(session->file),
  1376. tchart,
  1377. process_header);
  1378. if (!perf_session__has_traces(session, "timechart record"))
  1379. goto out_delete;
  1380. if (perf_session__set_tracepoints_handlers(session,
  1381. power_tracepoints)) {
  1382. pr_err("Initializing session tracepoint handlers failed\n");
  1383. goto out_delete;
  1384. }
  1385. ret = perf_session__process_events(session);
  1386. if (ret)
  1387. goto out_delete;
  1388. end_sample_processing(tchart);
  1389. sort_pids(tchart);
  1390. write_svg_file(tchart, output_name);
  1391. pr_info("Written %2.1f seconds of trace to %s.\n",
  1392. (tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
  1393. out_delete:
  1394. perf_session__delete(session);
  1395. return ret;
  1396. }
  1397. static int timechart__io_record(int argc, const char **argv)
  1398. {
  1399. unsigned int rec_argc, i;
  1400. const char **rec_argv;
  1401. const char **p;
  1402. char *filter = NULL;
  1403. const char * const common_args[] = {
  1404. "record", "-a", "-R", "-c", "1",
  1405. };
  1406. unsigned int common_args_nr = ARRAY_SIZE(common_args);
  1407. const char * const disk_events[] = {
  1408. "syscalls:sys_enter_read",
  1409. "syscalls:sys_enter_pread64",
  1410. "syscalls:sys_enter_readv",
  1411. "syscalls:sys_enter_preadv",
  1412. "syscalls:sys_enter_write",
  1413. "syscalls:sys_enter_pwrite64",
  1414. "syscalls:sys_enter_writev",
  1415. "syscalls:sys_enter_pwritev",
  1416. "syscalls:sys_enter_sync",
  1417. "syscalls:sys_enter_sync_file_range",
  1418. "syscalls:sys_enter_fsync",
  1419. "syscalls:sys_enter_msync",
  1420. "syscalls:sys_exit_read",
  1421. "syscalls:sys_exit_pread64",
  1422. "syscalls:sys_exit_readv",
  1423. "syscalls:sys_exit_preadv",
  1424. "syscalls:sys_exit_write",
  1425. "syscalls:sys_exit_pwrite64",
  1426. "syscalls:sys_exit_writev",
  1427. "syscalls:sys_exit_pwritev",
  1428. "syscalls:sys_exit_sync",
  1429. "syscalls:sys_exit_sync_file_range",
  1430. "syscalls:sys_exit_fsync",
  1431. "syscalls:sys_exit_msync",
  1432. };
  1433. unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
  1434. const char * const net_events[] = {
  1435. "syscalls:sys_enter_recvfrom",
  1436. "syscalls:sys_enter_recvmmsg",
  1437. "syscalls:sys_enter_recvmsg",
  1438. "syscalls:sys_enter_sendto",
  1439. "syscalls:sys_enter_sendmsg",
  1440. "syscalls:sys_enter_sendmmsg",
  1441. "syscalls:sys_exit_recvfrom",
  1442. "syscalls:sys_exit_recvmmsg",
  1443. "syscalls:sys_exit_recvmsg",
  1444. "syscalls:sys_exit_sendto",
  1445. "syscalls:sys_exit_sendmsg",
  1446. "syscalls:sys_exit_sendmmsg",
  1447. };
  1448. unsigned int net_events_nr = ARRAY_SIZE(net_events);
  1449. const char * const poll_events[] = {
  1450. "syscalls:sys_enter_epoll_pwait",
  1451. "syscalls:sys_enter_epoll_wait",
  1452. "syscalls:sys_enter_poll",
  1453. "syscalls:sys_enter_ppoll",
  1454. "syscalls:sys_enter_pselect6",
  1455. "syscalls:sys_enter_select",
  1456. "syscalls:sys_exit_epoll_pwait",
  1457. "syscalls:sys_exit_epoll_wait",
  1458. "syscalls:sys_exit_poll",
  1459. "syscalls:sys_exit_ppoll",
  1460. "syscalls:sys_exit_pselect6",
  1461. "syscalls:sys_exit_select",
  1462. };
  1463. unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
  1464. rec_argc = common_args_nr +
  1465. disk_events_nr * 4 +
  1466. net_events_nr * 4 +
  1467. poll_events_nr * 4 +
  1468. argc;
  1469. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  1470. if (rec_argv == NULL)
  1471. return -ENOMEM;
  1472. if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
  1473. return -ENOMEM;
  1474. p = rec_argv;
  1475. for (i = 0; i < common_args_nr; i++)
  1476. *p++ = strdup(common_args[i]);
  1477. for (i = 0; i < disk_events_nr; i++) {
  1478. if (!is_valid_tracepoint(disk_events[i])) {
  1479. rec_argc -= 4;
  1480. continue;
  1481. }
  1482. *p++ = "-e";
  1483. *p++ = strdup(disk_events[i]);
  1484. *p++ = "--filter";
  1485. *p++ = filter;
  1486. }
  1487. for (i = 0; i < net_events_nr; i++) {
  1488. if (!is_valid_tracepoint(net_events[i])) {
  1489. rec_argc -= 4;
  1490. continue;
  1491. }
  1492. *p++ = "-e";
  1493. *p++ = strdup(net_events[i]);
  1494. *p++ = "--filter";
  1495. *p++ = filter;
  1496. }
  1497. for (i = 0; i < poll_events_nr; i++) {
  1498. if (!is_valid_tracepoint(poll_events[i])) {
  1499. rec_argc -= 4;
  1500. continue;
  1501. }
  1502. *p++ = "-e";
  1503. *p++ = strdup(poll_events[i]);
  1504. *p++ = "--filter";
  1505. *p++ = filter;
  1506. }
  1507. for (i = 0; i < (unsigned int)argc; i++)
  1508. *p++ = argv[i];
  1509. return cmd_record(rec_argc, rec_argv, NULL);
  1510. }
  1511. static int timechart__record(struct timechart *tchart, int argc, const char **argv)
  1512. {
  1513. unsigned int rec_argc, i, j;
  1514. const char **rec_argv;
  1515. const char **p;
  1516. unsigned int record_elems;
  1517. const char * const common_args[] = {
  1518. "record", "-a", "-R", "-c", "1",
  1519. };
  1520. unsigned int common_args_nr = ARRAY_SIZE(common_args);
  1521. const char * const backtrace_args[] = {
  1522. "-g",
  1523. };
  1524. unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
  1525. const char * const power_args[] = {
  1526. "-e", "power:cpu_frequency",
  1527. "-e", "power:cpu_idle",
  1528. };
  1529. unsigned int power_args_nr = ARRAY_SIZE(power_args);
  1530. const char * const old_power_args[] = {
  1531. #ifdef SUPPORT_OLD_POWER_EVENTS
  1532. "-e", "power:power_start",
  1533. "-e", "power:power_end",
  1534. "-e", "power:power_frequency",
  1535. #endif
  1536. };
  1537. unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
  1538. const char * const tasks_args[] = {
  1539. "-e", "sched:sched_wakeup",
  1540. "-e", "sched:sched_switch",
  1541. };
  1542. unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
  1543. #ifdef SUPPORT_OLD_POWER_EVENTS
  1544. if (!is_valid_tracepoint("power:cpu_idle") &&
  1545. is_valid_tracepoint("power:power_start")) {
  1546. use_old_power_events = 1;
  1547. power_args_nr = 0;
  1548. } else {
  1549. old_power_args_nr = 0;
  1550. }
  1551. #endif
  1552. if (tchart->power_only)
  1553. tasks_args_nr = 0;
  1554. if (tchart->tasks_only) {
  1555. power_args_nr = 0;
  1556. old_power_args_nr = 0;
  1557. }
  1558. if (!tchart->with_backtrace)
  1559. backtrace_args_no = 0;
  1560. record_elems = common_args_nr + tasks_args_nr +
  1561. power_args_nr + old_power_args_nr + backtrace_args_no;
  1562. rec_argc = record_elems + argc;
  1563. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  1564. if (rec_argv == NULL)
  1565. return -ENOMEM;
  1566. p = rec_argv;
  1567. for (i = 0; i < common_args_nr; i++)
  1568. *p++ = strdup(common_args[i]);
  1569. for (i = 0; i < backtrace_args_no; i++)
  1570. *p++ = strdup(backtrace_args[i]);
  1571. for (i = 0; i < tasks_args_nr; i++)
  1572. *p++ = strdup(tasks_args[i]);
  1573. for (i = 0; i < power_args_nr; i++)
  1574. *p++ = strdup(power_args[i]);
  1575. for (i = 0; i < old_power_args_nr; i++)
  1576. *p++ = strdup(old_power_args[i]);
  1577. for (j = 0; j < (unsigned int)argc; j++)
  1578. *p++ = argv[j];
  1579. return cmd_record(rec_argc, rec_argv, NULL);
  1580. }
  1581. static int
  1582. parse_process(const struct option *opt __maybe_unused, const char *arg,
  1583. int __maybe_unused unset)
  1584. {
  1585. if (arg)
  1586. add_process_filter(arg);
  1587. return 0;
  1588. }
  1589. static int
  1590. parse_highlight(const struct option *opt __maybe_unused, const char *arg,
  1591. int __maybe_unused unset)
  1592. {
  1593. unsigned long duration = strtoul(arg, NULL, 0);
  1594. if (svg_highlight || svg_highlight_name)
  1595. return -1;
  1596. if (duration)
  1597. svg_highlight = duration;
  1598. else
  1599. svg_highlight_name = strdup(arg);
  1600. return 0;
  1601. }
  1602. static int
  1603. parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
  1604. {
  1605. char unit = 'n';
  1606. u64 *value = opt->value;
  1607. if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
  1608. switch (unit) {
  1609. case 'm':
  1610. *value *= NSEC_PER_MSEC;
  1611. break;
  1612. case 'u':
  1613. *value *= NSEC_PER_USEC;
  1614. break;
  1615. case 'n':
  1616. break;
  1617. default:
  1618. return -1;
  1619. }
  1620. }
  1621. return 0;
  1622. }
  1623. int cmd_timechart(int argc, const char **argv,
  1624. const char *prefix __maybe_unused)
  1625. {
  1626. struct timechart tchart = {
  1627. .tool = {
  1628. .comm = process_comm_event,
  1629. .fork = process_fork_event,
  1630. .exit = process_exit_event,
  1631. .sample = process_sample_event,
  1632. .ordered_events = true,
  1633. },
  1634. .proc_num = 15,
  1635. .min_time = NSEC_PER_MSEC,
  1636. .merge_dist = 1000,
  1637. };
  1638. const char *output_name = "output.svg";
  1639. const struct option timechart_options[] = {
  1640. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  1641. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  1642. OPT_INTEGER('w', "width", &svg_page_width, "page width"),
  1643. OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
  1644. "highlight tasks. Pass duration in ns or process name.",
  1645. parse_highlight),
  1646. OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
  1647. OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
  1648. "output processes data only"),
  1649. OPT_CALLBACK('p', "process", NULL, "process",
  1650. "process selector. Pass a pid or process name.",
  1651. parse_process),
  1652. OPT_CALLBACK(0, "symfs", NULL, "directory",
  1653. "Look for files with symbols relative to this directory",
  1654. symbol__config_symfs),
  1655. OPT_INTEGER('n', "proc-num", &tchart.proc_num,
  1656. "min. number of tasks to print"),
  1657. OPT_BOOLEAN('t', "topology", &tchart.topology,
  1658. "sort CPUs according to topology"),
  1659. OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
  1660. "skip EAGAIN errors"),
  1661. OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
  1662. "all IO faster than min-time will visually appear longer",
  1663. parse_time),
  1664. OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
  1665. "merge events that are merge-dist us apart",
  1666. parse_time),
  1667. OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
  1668. OPT_END()
  1669. };
  1670. const char * const timechart_subcommands[] = { "record", NULL };
  1671. const char *timechart_usage[] = {
  1672. "perf timechart [<options>] {record}",
  1673. NULL
  1674. };
  1675. const struct option timechart_record_options[] = {
  1676. OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
  1677. OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
  1678. "output processes data only"),
  1679. OPT_BOOLEAN('I', "io-only", &tchart.io_only,
  1680. "record only IO data"),
  1681. OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
  1682. OPT_END()
  1683. };
  1684. const char * const timechart_record_usage[] = {
  1685. "perf timechart record [<options>]",
  1686. NULL
  1687. };
  1688. argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
  1689. timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
  1690. if (tchart.power_only && tchart.tasks_only) {
  1691. pr_err("-P and -T options cannot be used at the same time.\n");
  1692. return -1;
  1693. }
  1694. if (argc && !strncmp(argv[0], "rec", 3)) {
  1695. argc = parse_options(argc, argv, timechart_record_options,
  1696. timechart_record_usage,
  1697. PARSE_OPT_STOP_AT_NON_OPTION);
  1698. if (tchart.power_only && tchart.tasks_only) {
  1699. pr_err("-P and -T options cannot be used at the same time.\n");
  1700. return -1;
  1701. }
  1702. if (tchart.io_only)
  1703. return timechart__io_record(argc, argv);
  1704. else
  1705. return timechart__record(&tchart, argc, argv);
  1706. } else if (argc)
  1707. usage_with_options(timechart_usage, timechart_options);
  1708. setup_pager();
  1709. return __cmd_timechart(&tchart, output_name);
  1710. }