hist.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. #include <math.h>
  2. #include <linux/compiler.h>
  3. #include "../util/hist.h"
  4. #include "../util/util.h"
  5. #include "../util/sort.h"
  6. #include "../util/evsel.h"
  7. #include "../util/evlist.h"
  8. /* hist period print (hpp) functions */
  9. #define hpp__call_print_fn(hpp, fn, fmt, ...) \
  10. ({ \
  11. int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
  12. advance_hpp(hpp, __ret); \
  13. __ret; \
  14. })
  15. static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
  16. hpp_field_fn get_field, const char *fmt, int len,
  17. hpp_snprint_fn print_fn, bool fmt_percent)
  18. {
  19. int ret;
  20. struct hists *hists = he->hists;
  21. struct perf_evsel *evsel = hists_to_evsel(hists);
  22. char *buf = hpp->buf;
  23. size_t size = hpp->size;
  24. if (fmt_percent) {
  25. double percent = 0.0;
  26. u64 total = hists__total_period(hists);
  27. if (total)
  28. percent = 100.0 * get_field(he) / total;
  29. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
  30. } else
  31. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
  32. if (perf_evsel__is_group_event(evsel)) {
  33. int prev_idx, idx_delta;
  34. struct hist_entry *pair;
  35. int nr_members = evsel->nr_members;
  36. prev_idx = perf_evsel__group_idx(evsel);
  37. list_for_each_entry(pair, &he->pairs.head, pairs.node) {
  38. u64 period = get_field(pair);
  39. u64 total = hists__total_period(pair->hists);
  40. if (!total)
  41. continue;
  42. evsel = hists_to_evsel(pair->hists);
  43. idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
  44. while (idx_delta--) {
  45. /*
  46. * zero-fill group members in the middle which
  47. * have no sample
  48. */
  49. if (fmt_percent) {
  50. ret += hpp__call_print_fn(hpp, print_fn,
  51. fmt, len, 0.0);
  52. } else {
  53. ret += hpp__call_print_fn(hpp, print_fn,
  54. fmt, len, 0ULL);
  55. }
  56. }
  57. if (fmt_percent) {
  58. ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
  59. 100.0 * period / total);
  60. } else {
  61. ret += hpp__call_print_fn(hpp, print_fn, fmt,
  62. len, period);
  63. }
  64. prev_idx = perf_evsel__group_idx(evsel);
  65. }
  66. idx_delta = nr_members - prev_idx - 1;
  67. while (idx_delta--) {
  68. /*
  69. * zero-fill group members at last which have no sample
  70. */
  71. if (fmt_percent) {
  72. ret += hpp__call_print_fn(hpp, print_fn,
  73. fmt, len, 0.0);
  74. } else {
  75. ret += hpp__call_print_fn(hpp, print_fn,
  76. fmt, len, 0ULL);
  77. }
  78. }
  79. }
  80. /*
  81. * Restore original buf and size as it's where caller expects
  82. * the result will be saved.
  83. */
  84. hpp->buf = buf;
  85. hpp->size = size;
  86. return ret;
  87. }
  88. int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  89. struct hist_entry *he, hpp_field_fn get_field,
  90. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  91. {
  92. int len = fmt->user_len ?: fmt->len;
  93. if (symbol_conf.field_sep) {
  94. return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
  95. print_fn, fmt_percent);
  96. }
  97. if (fmt_percent)
  98. len -= 2; /* 2 for a space and a % sign */
  99. else
  100. len -= 1;
  101. return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
  102. }
  103. int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  104. struct hist_entry *he, hpp_field_fn get_field,
  105. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  106. {
  107. if (!symbol_conf.cumulate_callchain) {
  108. int len = fmt->user_len ?: fmt->len;
  109. return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
  110. }
  111. return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
  112. }
  113. static int field_cmp(u64 field_a, u64 field_b)
  114. {
  115. if (field_a > field_b)
  116. return 1;
  117. if (field_a < field_b)
  118. return -1;
  119. return 0;
  120. }
  121. static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
  122. hpp_field_fn get_field)
  123. {
  124. s64 ret;
  125. int i, nr_members;
  126. struct perf_evsel *evsel;
  127. struct hist_entry *pair;
  128. u64 *fields_a, *fields_b;
  129. ret = field_cmp(get_field(a), get_field(b));
  130. if (ret || !symbol_conf.event_group)
  131. return ret;
  132. evsel = hists_to_evsel(a->hists);
  133. if (!perf_evsel__is_group_event(evsel))
  134. return ret;
  135. nr_members = evsel->nr_members;
  136. fields_a = calloc(nr_members, sizeof(*fields_a));
  137. fields_b = calloc(nr_members, sizeof(*fields_b));
  138. if (!fields_a || !fields_b)
  139. goto out;
  140. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  141. evsel = hists_to_evsel(pair->hists);
  142. fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
  143. }
  144. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  145. evsel = hists_to_evsel(pair->hists);
  146. fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
  147. }
  148. for (i = 1; i < nr_members; i++) {
  149. ret = field_cmp(fields_a[i], fields_b[i]);
  150. if (ret)
  151. break;
  152. }
  153. out:
  154. free(fields_a);
  155. free(fields_b);
  156. return ret;
  157. }
  158. static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
  159. hpp_field_fn get_field)
  160. {
  161. s64 ret = 0;
  162. if (symbol_conf.cumulate_callchain) {
  163. /*
  164. * Put caller above callee when they have equal period.
  165. */
  166. ret = field_cmp(get_field(a), get_field(b));
  167. if (ret)
  168. return ret;
  169. if (a->thread != b->thread || !symbol_conf.use_callchain)
  170. return 0;
  171. ret = b->callchain->max_depth - a->callchain->max_depth;
  172. }
  173. return ret;
  174. }
  175. static int hpp__width_fn(struct perf_hpp_fmt *fmt,
  176. struct perf_hpp *hpp __maybe_unused,
  177. struct hists *hists)
  178. {
  179. int len = fmt->user_len ?: fmt->len;
  180. struct perf_evsel *evsel = hists_to_evsel(hists);
  181. if (symbol_conf.event_group)
  182. len = max(len, evsel->nr_members * fmt->len);
  183. if (len < (int)strlen(fmt->name))
  184. len = strlen(fmt->name);
  185. return len;
  186. }
  187. static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  188. struct hists *hists, int line __maybe_unused,
  189. int *span __maybe_unused)
  190. {
  191. int len = hpp__width_fn(fmt, hpp, hists);
  192. return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
  193. }
  194. int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  195. {
  196. va_list args;
  197. ssize_t ssize = hpp->size;
  198. double percent;
  199. int ret, len;
  200. va_start(args, fmt);
  201. len = va_arg(args, int);
  202. percent = va_arg(args, double);
  203. ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
  204. va_end(args);
  205. return (ret >= ssize) ? (ssize - 1) : ret;
  206. }
  207. static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  208. {
  209. va_list args;
  210. ssize_t ssize = hpp->size;
  211. int ret;
  212. va_start(args, fmt);
  213. ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
  214. va_end(args);
  215. return (ret >= ssize) ? (ssize - 1) : ret;
  216. }
  217. #define __HPP_COLOR_PERCENT_FN(_type, _field) \
  218. static u64 he_get_##_field(struct hist_entry *he) \
  219. { \
  220. return he->stat._field; \
  221. } \
  222. \
  223. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  224. struct perf_hpp *hpp, struct hist_entry *he) \
  225. { \
  226. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  227. hpp_color_scnprintf, true); \
  228. }
  229. #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
  230. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  231. struct perf_hpp *hpp, struct hist_entry *he) \
  232. { \
  233. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  234. hpp_entry_scnprintf, true); \
  235. }
  236. #define __HPP_SORT_FN(_type, _field) \
  237. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  238. struct hist_entry *a, struct hist_entry *b) \
  239. { \
  240. return __hpp__sort(a, b, he_get_##_field); \
  241. }
  242. #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  243. static u64 he_get_acc_##_field(struct hist_entry *he) \
  244. { \
  245. return he->stat_acc->_field; \
  246. } \
  247. \
  248. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  249. struct perf_hpp *hpp, struct hist_entry *he) \
  250. { \
  251. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  252. hpp_color_scnprintf, true); \
  253. }
  254. #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  255. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  256. struct perf_hpp *hpp, struct hist_entry *he) \
  257. { \
  258. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  259. hpp_entry_scnprintf, true); \
  260. }
  261. #define __HPP_SORT_ACC_FN(_type, _field) \
  262. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  263. struct hist_entry *a, struct hist_entry *b) \
  264. { \
  265. return __hpp__sort_acc(a, b, he_get_acc_##_field); \
  266. }
  267. #define __HPP_ENTRY_RAW_FN(_type, _field) \
  268. static u64 he_get_raw_##_field(struct hist_entry *he) \
  269. { \
  270. return he->stat._field; \
  271. } \
  272. \
  273. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  274. struct perf_hpp *hpp, struct hist_entry *he) \
  275. { \
  276. return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
  277. hpp_entry_scnprintf, false); \
  278. }
  279. #define __HPP_SORT_RAW_FN(_type, _field) \
  280. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  281. struct hist_entry *a, struct hist_entry *b) \
  282. { \
  283. return __hpp__sort(a, b, he_get_raw_##_field); \
  284. }
  285. #define HPP_PERCENT_FNS(_type, _field) \
  286. __HPP_COLOR_PERCENT_FN(_type, _field) \
  287. __HPP_ENTRY_PERCENT_FN(_type, _field) \
  288. __HPP_SORT_FN(_type, _field)
  289. #define HPP_PERCENT_ACC_FNS(_type, _field) \
  290. __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  291. __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  292. __HPP_SORT_ACC_FN(_type, _field)
  293. #define HPP_RAW_FNS(_type, _field) \
  294. __HPP_ENTRY_RAW_FN(_type, _field) \
  295. __HPP_SORT_RAW_FN(_type, _field)
  296. HPP_PERCENT_FNS(overhead, period)
  297. HPP_PERCENT_FNS(overhead_sys, period_sys)
  298. HPP_PERCENT_FNS(overhead_us, period_us)
  299. HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
  300. HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
  301. HPP_PERCENT_ACC_FNS(overhead_acc, period)
  302. HPP_RAW_FNS(samples, nr_events)
  303. HPP_RAW_FNS(period, period)
  304. static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
  305. struct hist_entry *a __maybe_unused,
  306. struct hist_entry *b __maybe_unused)
  307. {
  308. return 0;
  309. }
  310. static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
  311. {
  312. return a->header == hpp__header_fn;
  313. }
  314. static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  315. {
  316. if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
  317. return false;
  318. return a->idx == b->idx;
  319. }
  320. #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
  321. { \
  322. .name = _name, \
  323. .header = hpp__header_fn, \
  324. .width = hpp__width_fn, \
  325. .color = hpp__color_ ## _fn, \
  326. .entry = hpp__entry_ ## _fn, \
  327. .cmp = hpp__nop_cmp, \
  328. .collapse = hpp__nop_cmp, \
  329. .sort = hpp__sort_ ## _fn, \
  330. .idx = PERF_HPP__ ## _idx, \
  331. .equal = hpp__equal, \
  332. }
  333. #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
  334. { \
  335. .name = _name, \
  336. .header = hpp__header_fn, \
  337. .width = hpp__width_fn, \
  338. .color = hpp__color_ ## _fn, \
  339. .entry = hpp__entry_ ## _fn, \
  340. .cmp = hpp__nop_cmp, \
  341. .collapse = hpp__nop_cmp, \
  342. .sort = hpp__sort_ ## _fn, \
  343. .idx = PERF_HPP__ ## _idx, \
  344. .equal = hpp__equal, \
  345. }
  346. #define HPP__PRINT_FNS(_name, _fn, _idx) \
  347. { \
  348. .name = _name, \
  349. .header = hpp__header_fn, \
  350. .width = hpp__width_fn, \
  351. .entry = hpp__entry_ ## _fn, \
  352. .cmp = hpp__nop_cmp, \
  353. .collapse = hpp__nop_cmp, \
  354. .sort = hpp__sort_ ## _fn, \
  355. .idx = PERF_HPP__ ## _idx, \
  356. .equal = hpp__equal, \
  357. }
  358. struct perf_hpp_fmt perf_hpp__format[] = {
  359. HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
  360. HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
  361. HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
  362. HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
  363. HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
  364. HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
  365. HPP__PRINT_FNS("Samples", samples, SAMPLES),
  366. HPP__PRINT_FNS("Period", period, PERIOD)
  367. };
  368. struct perf_hpp_list perf_hpp_list = {
  369. .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
  370. .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
  371. .nr_header_lines = 1,
  372. };
  373. #undef HPP__COLOR_PRINT_FNS
  374. #undef HPP__COLOR_ACC_PRINT_FNS
  375. #undef HPP__PRINT_FNS
  376. #undef HPP_PERCENT_FNS
  377. #undef HPP_PERCENT_ACC_FNS
  378. #undef HPP_RAW_FNS
  379. #undef __HPP_HEADER_FN
  380. #undef __HPP_WIDTH_FN
  381. #undef __HPP_COLOR_PERCENT_FN
  382. #undef __HPP_ENTRY_PERCENT_FN
  383. #undef __HPP_COLOR_ACC_PERCENT_FN
  384. #undef __HPP_ENTRY_ACC_PERCENT_FN
  385. #undef __HPP_ENTRY_RAW_FN
  386. #undef __HPP_SORT_FN
  387. #undef __HPP_SORT_ACC_FN
  388. #undef __HPP_SORT_RAW_FN
  389. void perf_hpp__init(void)
  390. {
  391. int i;
  392. for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
  393. struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
  394. INIT_LIST_HEAD(&fmt->list);
  395. /* sort_list may be linked by setup_sorting() */
  396. if (fmt->sort_list.next == NULL)
  397. INIT_LIST_HEAD(&fmt->sort_list);
  398. }
  399. /*
  400. * If user specified field order, no need to setup default fields.
  401. */
  402. if (is_strict_order(field_order))
  403. return;
  404. if (symbol_conf.cumulate_callchain) {
  405. hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
  406. perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
  407. }
  408. hpp_dimension__add_output(PERF_HPP__OVERHEAD);
  409. if (symbol_conf.show_cpu_utilization) {
  410. hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
  411. hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
  412. if (perf_guest) {
  413. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
  414. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
  415. }
  416. }
  417. if (symbol_conf.show_nr_samples)
  418. hpp_dimension__add_output(PERF_HPP__SAMPLES);
  419. if (symbol_conf.show_total_period)
  420. hpp_dimension__add_output(PERF_HPP__PERIOD);
  421. }
  422. void perf_hpp_list__column_register(struct perf_hpp_list *list,
  423. struct perf_hpp_fmt *format)
  424. {
  425. list_add_tail(&format->list, &list->fields);
  426. }
  427. void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
  428. struct perf_hpp_fmt *format)
  429. {
  430. list_add_tail(&format->sort_list, &list->sorts);
  431. }
  432. void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
  433. struct perf_hpp_fmt *format)
  434. {
  435. list_add(&format->sort_list, &list->sorts);
  436. }
  437. void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
  438. {
  439. list_del(&format->list);
  440. }
  441. void perf_hpp__cancel_cumulate(void)
  442. {
  443. struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
  444. if (is_strict_order(field_order))
  445. return;
  446. ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
  447. acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
  448. perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
  449. if (acc->equal(acc, fmt)) {
  450. perf_hpp__column_unregister(fmt);
  451. continue;
  452. }
  453. if (ovh->equal(ovh, fmt))
  454. fmt->name = "Overhead";
  455. }
  456. }
  457. static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  458. {
  459. return a->equal && a->equal(a, b);
  460. }
  461. void perf_hpp__setup_output_field(struct perf_hpp_list *list)
  462. {
  463. struct perf_hpp_fmt *fmt;
  464. /* append sort keys to output field */
  465. perf_hpp_list__for_each_sort_list(list, fmt) {
  466. struct perf_hpp_fmt *pos;
  467. /* skip sort-only fields ("sort_compute" in perf diff) */
  468. if (!fmt->entry && !fmt->color)
  469. continue;
  470. perf_hpp_list__for_each_format(list, pos) {
  471. if (fmt_equal(fmt, pos))
  472. goto next;
  473. }
  474. perf_hpp__column_register(fmt);
  475. next:
  476. continue;
  477. }
  478. }
  479. void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
  480. {
  481. struct perf_hpp_fmt *fmt;
  482. /* append output fields to sort keys */
  483. perf_hpp_list__for_each_format(list, fmt) {
  484. struct perf_hpp_fmt *pos;
  485. perf_hpp_list__for_each_sort_list(list, pos) {
  486. if (fmt_equal(fmt, pos))
  487. goto next;
  488. }
  489. perf_hpp__register_sort_field(fmt);
  490. next:
  491. continue;
  492. }
  493. }
  494. static void fmt_free(struct perf_hpp_fmt *fmt)
  495. {
  496. if (fmt->free)
  497. fmt->free(fmt);
  498. }
  499. void perf_hpp__reset_output_field(struct perf_hpp_list *list)
  500. {
  501. struct perf_hpp_fmt *fmt, *tmp;
  502. /* reset output fields */
  503. perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
  504. list_del_init(&fmt->list);
  505. list_del_init(&fmt->sort_list);
  506. fmt_free(fmt);
  507. }
  508. /* reset sort keys */
  509. perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
  510. list_del_init(&fmt->list);
  511. list_del_init(&fmt->sort_list);
  512. fmt_free(fmt);
  513. }
  514. }
  515. /*
  516. * See hists__fprintf to match the column widths
  517. */
  518. unsigned int hists__sort_list_width(struct hists *hists)
  519. {
  520. struct perf_hpp_fmt *fmt;
  521. int ret = 0;
  522. bool first = true;
  523. struct perf_hpp dummy_hpp;
  524. hists__for_each_format(hists, fmt) {
  525. if (perf_hpp__should_skip(fmt, hists))
  526. continue;
  527. if (first)
  528. first = false;
  529. else
  530. ret += 2;
  531. ret += fmt->width(fmt, &dummy_hpp, hists);
  532. }
  533. if (verbose && hists__has(hists, sym)) /* Addr + origin */
  534. ret += 3 + BITS_PER_LONG / 4;
  535. return ret;
  536. }
  537. unsigned int hists__overhead_width(struct hists *hists)
  538. {
  539. struct perf_hpp_fmt *fmt;
  540. int ret = 0;
  541. bool first = true;
  542. struct perf_hpp dummy_hpp;
  543. hists__for_each_format(hists, fmt) {
  544. if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
  545. break;
  546. if (first)
  547. first = false;
  548. else
  549. ret += 2;
  550. ret += fmt->width(fmt, &dummy_hpp, hists);
  551. }
  552. return ret;
  553. }
  554. void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  555. {
  556. if (perf_hpp__is_sort_entry(fmt))
  557. return perf_hpp__reset_sort_width(fmt, hists);
  558. if (perf_hpp__is_dynamic_entry(fmt))
  559. return;
  560. BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
  561. switch (fmt->idx) {
  562. case PERF_HPP__OVERHEAD:
  563. case PERF_HPP__OVERHEAD_SYS:
  564. case PERF_HPP__OVERHEAD_US:
  565. case PERF_HPP__OVERHEAD_ACC:
  566. fmt->len = 8;
  567. break;
  568. case PERF_HPP__OVERHEAD_GUEST_SYS:
  569. case PERF_HPP__OVERHEAD_GUEST_US:
  570. fmt->len = 9;
  571. break;
  572. case PERF_HPP__SAMPLES:
  573. case PERF_HPP__PERIOD:
  574. fmt->len = 12;
  575. break;
  576. default:
  577. break;
  578. }
  579. }
  580. void hists__reset_column_width(struct hists *hists)
  581. {
  582. struct perf_hpp_fmt *fmt;
  583. struct perf_hpp_list_node *node;
  584. hists__for_each_format(hists, fmt)
  585. perf_hpp__reset_width(fmt, hists);
  586. /* hierarchy entries have their own hpp list */
  587. list_for_each_entry(node, &hists->hpp_formats, list) {
  588. perf_hpp_list__for_each_format(&node->hpp, fmt)
  589. perf_hpp__reset_width(fmt, hists);
  590. }
  591. }
  592. void perf_hpp__set_user_width(const char *width_list_str)
  593. {
  594. struct perf_hpp_fmt *fmt;
  595. const char *ptr = width_list_str;
  596. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  597. char *p;
  598. int len = strtol(ptr, &p, 10);
  599. fmt->user_len = len;
  600. if (*p == ',')
  601. ptr = p + 1;
  602. else
  603. break;
  604. }
  605. }
  606. static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
  607. {
  608. struct perf_hpp_list_node *node = NULL;
  609. struct perf_hpp_fmt *fmt_copy;
  610. bool found = false;
  611. bool skip = perf_hpp__should_skip(fmt, hists);
  612. list_for_each_entry(node, &hists->hpp_formats, list) {
  613. if (node->level == fmt->level) {
  614. found = true;
  615. break;
  616. }
  617. }
  618. if (!found) {
  619. node = malloc(sizeof(*node));
  620. if (node == NULL)
  621. return -1;
  622. node->skip = skip;
  623. node->level = fmt->level;
  624. perf_hpp_list__init(&node->hpp);
  625. hists->nr_hpp_node++;
  626. list_add_tail(&node->list, &hists->hpp_formats);
  627. }
  628. fmt_copy = perf_hpp_fmt__dup(fmt);
  629. if (fmt_copy == NULL)
  630. return -1;
  631. if (!skip)
  632. node->skip = false;
  633. list_add_tail(&fmt_copy->list, &node->hpp.fields);
  634. list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
  635. return 0;
  636. }
  637. int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
  638. struct perf_evlist *evlist)
  639. {
  640. struct perf_evsel *evsel;
  641. struct perf_hpp_fmt *fmt;
  642. struct hists *hists;
  643. int ret;
  644. if (!symbol_conf.report_hierarchy)
  645. return 0;
  646. evlist__for_each_entry(evlist, evsel) {
  647. hists = evsel__hists(evsel);
  648. perf_hpp_list__for_each_sort_list(list, fmt) {
  649. if (perf_hpp__is_dynamic_entry(fmt) &&
  650. !perf_hpp__defined_dynamic_entry(fmt, hists))
  651. continue;
  652. ret = add_hierarchy_fmt(hists, fmt);
  653. if (ret < 0)
  654. return ret;
  655. }
  656. }
  657. return 0;
  658. }