ipa-profile.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /* Basic IPA optimizations based on profile.
  2. Copyright (C) 2003-2015 Free Software Foundation, Inc.
  3. This file is part of GCC.
  4. GCC is free software; you can redistribute it and/or modify it under
  5. the terms of the GNU General Public License as published by the Free
  6. Software Foundation; either version 3, or (at your option) any later
  7. version.
  8. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  9. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  11. for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with GCC; see the file COPYING3. If not see
  14. <http://www.gnu.org/licenses/>. */
  15. /* ipa-profile pass implements the following analysis propagating profille
  16. inter-procedurally.
  17. - Count histogram construction. This is a histogram analyzing how much
  18. time is spent executing statements with a given execution count read
  19. from profile feedback. This histogram is complete only with LTO,
  20. otherwise it contains information only about the current unit.
  21. Similar histogram is also estimated by coverage runtime. This histogram
  22. is not dependent on LTO, but it suffers from various defects; first
  23. gcov runtime is not weighting individual basic block by estimated execution
  24. time and second the merging of multiple runs makes assumption that the
  25. histogram distribution did not change. Consequentely histogram constructed
  26. here may be more precise.
  27. The information is used to set hot/cold thresholds.
  28. - Next speculative indirect call resolution is performed: the local
  29. profile pass assigns profile-id to each function and provide us with a
  30. histogram specifying the most common target. We look up the callgraph
  31. node corresponding to the target and produce a speculative call.
  32. This call may or may not survive through IPA optimization based on decision
  33. of inliner.
  34. - Finally we propagate the following flags: unlikely executed, executed
  35. once, executed at startup and executed at exit. These flags are used to
  36. control code size/performance threshold and and code placement (by producing
  37. .text.unlikely/.text.hot/.text.startup/.text.exit subsections). */
  38. #include "config.h"
  39. #include "system.h"
  40. #include "coretypes.h"
  41. #include "tm.h"
  42. #include "hash-set.h"
  43. #include "machmode.h"
  44. #include "vec.h"
  45. #include "double-int.h"
  46. #include "input.h"
  47. #include "alias.h"
  48. #include "symtab.h"
  49. #include "wide-int.h"
  50. #include "inchash.h"
  51. #include "tree.h"
  52. #include "fold-const.h"
  53. #include "predict.h"
  54. #include "dominance.h"
  55. #include "cfg.h"
  56. #include "basic-block.h"
  57. #include "hash-map.h"
  58. #include "is-a.h"
  59. #include "plugin-api.h"
  60. #include "hard-reg-set.h"
  61. #include "input.h"
  62. #include "function.h"
  63. #include "ipa-ref.h"
  64. #include "cgraph.h"
  65. #include "tree-pass.h"
  66. #include "tree-ssa-alias.h"
  67. #include "internal-fn.h"
  68. #include "gimple-expr.h"
  69. #include "gimple.h"
  70. #include "gimple-iterator.h"
  71. #include "flags.h"
  72. #include "target.h"
  73. #include "tree-iterator.h"
  74. #include "ipa-utils.h"
  75. #include "profile.h"
  76. #include "params.h"
  77. #include "value-prof.h"
  78. #include "alloc-pool.h"
  79. #include "tree-inline.h"
  80. #include "lto-streamer.h"
  81. #include "data-streamer.h"
  82. #include "symbol-summary.h"
  83. #include "ipa-prop.h"
  84. #include "ipa-inline.h"
  85. /* Entry in the histogram. */
  86. struct histogram_entry
  87. {
  88. gcov_type count;
  89. int time;
  90. int size;
  91. };
  92. /* Histogram of profile values.
  93. The histogram is represented as an ordered vector of entries allocated via
  94. histogram_pool. During construction a separate hashtable is kept to lookup
  95. duplicate entries. */
  96. vec<histogram_entry *> histogram;
  97. static alloc_pool histogram_pool;
  98. /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */
  99. struct histogram_hash : typed_noop_remove <histogram_entry>
  100. {
  101. typedef histogram_entry value_type;
  102. typedef histogram_entry compare_type;
  103. static inline hashval_t hash (const value_type *);
  104. static inline int equal (const value_type *, const compare_type *);
  105. };
  106. inline hashval_t
  107. histogram_hash::hash (const histogram_entry *val)
  108. {
  109. return val->count;
  110. }
  111. inline int
  112. histogram_hash::equal (const histogram_entry *val, const histogram_entry *val2)
  113. {
  114. return val->count == val2->count;
  115. }
  116. /* Account TIME and SIZE executed COUNT times into HISTOGRAM.
  117. HASHTABLE is the on-side hash kept to avoid duplicates. */
  118. static void
  119. account_time_size (hash_table<histogram_hash> *hashtable,
  120. vec<histogram_entry *> &histogram,
  121. gcov_type count, int time, int size)
  122. {
  123. histogram_entry key = {count, 0, 0};
  124. histogram_entry **val = hashtable->find_slot (&key, INSERT);
  125. if (!*val)
  126. {
  127. *val = (histogram_entry *) pool_alloc (histogram_pool);
  128. **val = key;
  129. histogram.safe_push (*val);
  130. }
  131. (*val)->time += time;
  132. (*val)->size += size;
  133. }
  134. int
  135. cmp_counts (const void *v1, const void *v2)
  136. {
  137. const histogram_entry *h1 = *(const histogram_entry * const *)v1;
  138. const histogram_entry *h2 = *(const histogram_entry * const *)v2;
  139. if (h1->count < h2->count)
  140. return 1;
  141. if (h1->count > h2->count)
  142. return -1;
  143. return 0;
  144. }
  145. /* Dump HISTOGRAM to FILE. */
  146. static void
  147. dump_histogram (FILE *file, vec<histogram_entry *> histogram)
  148. {
  149. unsigned int i;
  150. gcov_type overall_time = 0, cumulated_time = 0, cumulated_size = 0, overall_size = 0;
  151. fprintf (dump_file, "Histogram:\n");
  152. for (i = 0; i < histogram.length (); i++)
  153. {
  154. overall_time += histogram[i]->count * histogram[i]->time;
  155. overall_size += histogram[i]->size;
  156. }
  157. if (!overall_time)
  158. overall_time = 1;
  159. if (!overall_size)
  160. overall_size = 1;
  161. for (i = 0; i < histogram.length (); i++)
  162. {
  163. cumulated_time += histogram[i]->count * histogram[i]->time;
  164. cumulated_size += histogram[i]->size;
  165. fprintf (file, " %"PRId64": time:%i (%2.2f) size:%i (%2.2f)\n",
  166. (int64_t) histogram[i]->count,
  167. histogram[i]->time,
  168. cumulated_time * 100.0 / overall_time,
  169. histogram[i]->size,
  170. cumulated_size * 100.0 / overall_size);
  171. }
  172. }
  173. /* Collect histogram from CFG profiles. */
  174. static void
  175. ipa_profile_generate_summary (void)
  176. {
  177. struct cgraph_node *node;
  178. gimple_stmt_iterator gsi;
  179. basic_block bb;
  180. hash_table<histogram_hash> hashtable (10);
  181. histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
  182. 10);
  183. FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
  184. FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
  185. {
  186. int time = 0;
  187. int size = 0;
  188. for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  189. {
  190. gimple stmt = gsi_stmt (gsi);
  191. if (gimple_code (stmt) == GIMPLE_CALL
  192. && !gimple_call_fndecl (stmt))
  193. {
  194. histogram_value h;
  195. h = gimple_histogram_value_of_type
  196. (DECL_STRUCT_FUNCTION (node->decl),
  197. stmt, HIST_TYPE_INDIR_CALL);
  198. /* No need to do sanity check: gimple_ic_transform already
  199. takes away bad histograms. */
  200. if (h)
  201. {
  202. /* counter 0 is target, counter 1 is number of execution we called target,
  203. counter 2 is total number of executions. */
  204. if (h->hvalue.counters[2])
  205. {
  206. struct cgraph_edge * e = node->get_edge (stmt);
  207. if (e && !e->indirect_unknown_callee)
  208. continue;
  209. e->indirect_info->common_target_id
  210. = h->hvalue.counters [0];
  211. e->indirect_info->common_target_probability
  212. = GCOV_COMPUTE_SCALE (h->hvalue.counters [1], h->hvalue.counters [2]);
  213. if (e->indirect_info->common_target_probability > REG_BR_PROB_BASE)
  214. {
  215. if (dump_file)
  216. fprintf (dump_file, "Probability capped to 1\n");
  217. e->indirect_info->common_target_probability = REG_BR_PROB_BASE;
  218. }
  219. }
  220. gimple_remove_histogram_value (DECL_STRUCT_FUNCTION (node->decl),
  221. stmt, h);
  222. }
  223. }
  224. time += estimate_num_insns (stmt, &eni_time_weights);
  225. size += estimate_num_insns (stmt, &eni_size_weights);
  226. }
  227. account_time_size (&hashtable, histogram, bb->count, time, size);
  228. }
  229. histogram.qsort (cmp_counts);
  230. }
  231. /* Serialize the ipa info for lto. */
  232. static void
  233. ipa_profile_write_summary (void)
  234. {
  235. struct lto_simple_output_block *ob
  236. = lto_create_simple_output_block (LTO_section_ipa_profile);
  237. unsigned int i;
  238. streamer_write_uhwi_stream (ob->main_stream, histogram.length ());
  239. for (i = 0; i < histogram.length (); i++)
  240. {
  241. streamer_write_gcov_count_stream (ob->main_stream, histogram[i]->count);
  242. streamer_write_uhwi_stream (ob->main_stream, histogram[i]->time);
  243. streamer_write_uhwi_stream (ob->main_stream, histogram[i]->size);
  244. }
  245. lto_destroy_simple_output_block (ob);
  246. }
  247. /* Deserialize the ipa info for lto. */
  248. static void
  249. ipa_profile_read_summary (void)
  250. {
  251. struct lto_file_decl_data ** file_data_vec
  252. = lto_get_file_decl_data ();
  253. struct lto_file_decl_data * file_data;
  254. int j = 0;
  255. hash_table<histogram_hash> hashtable (10);
  256. histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
  257. 10);
  258. while ((file_data = file_data_vec[j++]))
  259. {
  260. const char *data;
  261. size_t len;
  262. struct lto_input_block *ib
  263. = lto_create_simple_input_block (file_data,
  264. LTO_section_ipa_profile,
  265. &data, &len);
  266. if (ib)
  267. {
  268. unsigned int num = streamer_read_uhwi (ib);
  269. unsigned int n;
  270. for (n = 0; n < num; n++)
  271. {
  272. gcov_type count = streamer_read_gcov_count (ib);
  273. int time = streamer_read_uhwi (ib);
  274. int size = streamer_read_uhwi (ib);
  275. account_time_size (&hashtable, histogram,
  276. count, time, size);
  277. }
  278. lto_destroy_simple_input_block (file_data,
  279. LTO_section_ipa_profile,
  280. ib, data, len);
  281. }
  282. }
  283. histogram.qsort (cmp_counts);
  284. }
  285. /* Data used by ipa_propagate_frequency. */
  286. struct ipa_propagate_frequency_data
  287. {
  288. cgraph_node *function_symbol;
  289. bool maybe_unlikely_executed;
  290. bool maybe_executed_once;
  291. bool only_called_at_startup;
  292. bool only_called_at_exit;
  293. };
  294. /* Worker for ipa_propagate_frequency_1. */
  295. static bool
  296. ipa_propagate_frequency_1 (struct cgraph_node *node, void *data)
  297. {
  298. struct ipa_propagate_frequency_data *d;
  299. struct cgraph_edge *edge;
  300. d = (struct ipa_propagate_frequency_data *)data;
  301. for (edge = node->callers;
  302. edge && (d->maybe_unlikely_executed || d->maybe_executed_once
  303. || d->only_called_at_startup || d->only_called_at_exit);
  304. edge = edge->next_caller)
  305. {
  306. if (edge->caller != d->function_symbol)
  307. {
  308. d->only_called_at_startup &= edge->caller->only_called_at_startup;
  309. /* It makes sense to put main() together with the static constructors.
  310. It will be executed for sure, but rest of functions called from
  311. main are definitely not at startup only. */
  312. if (MAIN_NAME_P (DECL_NAME (edge->caller->decl)))
  313. d->only_called_at_startup = 0;
  314. d->only_called_at_exit &= edge->caller->only_called_at_exit;
  315. }
  316. /* When profile feedback is available, do not try to propagate too hard;
  317. counts are already good guide on function frequencies and roundoff
  318. errors can make us to push function into unlikely section even when
  319. it is executed by the train run. Transfer the function only if all
  320. callers are unlikely executed. */
  321. if (profile_info
  322. && opt_for_fn (d->function_symbol->decl, flag_branch_probabilities)
  323. /* Thunks are not profiled. This is more or less implementation
  324. bug. */
  325. && !d->function_symbol->thunk.thunk_p
  326. && (edge->caller->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED
  327. || (edge->caller->global.inlined_to
  328. && edge->caller->global.inlined_to->frequency
  329. != NODE_FREQUENCY_UNLIKELY_EXECUTED)))
  330. d->maybe_unlikely_executed = false;
  331. if (!edge->frequency)
  332. continue;
  333. switch (edge->caller->frequency)
  334. {
  335. case NODE_FREQUENCY_UNLIKELY_EXECUTED:
  336. break;
  337. case NODE_FREQUENCY_EXECUTED_ONCE:
  338. if (dump_file && (dump_flags & TDF_DETAILS))
  339. fprintf (dump_file, " Called by %s that is executed once\n",
  340. edge->caller->name ());
  341. d->maybe_unlikely_executed = false;
  342. if (inline_edge_summary (edge)->loop_depth)
  343. {
  344. d->maybe_executed_once = false;
  345. if (dump_file && (dump_flags & TDF_DETAILS))
  346. fprintf (dump_file, " Called in loop\n");
  347. }
  348. break;
  349. case NODE_FREQUENCY_HOT:
  350. case NODE_FREQUENCY_NORMAL:
  351. if (dump_file && (dump_flags & TDF_DETAILS))
  352. fprintf (dump_file, " Called by %s that is normal or hot\n",
  353. edge->caller->name ());
  354. d->maybe_unlikely_executed = false;
  355. d->maybe_executed_once = false;
  356. break;
  357. }
  358. }
  359. return edge != NULL;
  360. }
  361. /* Return ture if NODE contains hot calls. */
  362. bool
  363. contains_hot_call_p (struct cgraph_node *node)
  364. {
  365. struct cgraph_edge *e;
  366. for (e = node->callees; e; e = e->next_callee)
  367. if (e->maybe_hot_p ())
  368. return true;
  369. else if (!e->inline_failed
  370. && contains_hot_call_p (e->callee))
  371. return true;
  372. for (e = node->indirect_calls; e; e = e->next_callee)
  373. if (e->maybe_hot_p ())
  374. return true;
  375. return false;
  376. }
  377. /* See if the frequency of NODE can be updated based on frequencies of its
  378. callers. */
  379. bool
  380. ipa_propagate_frequency (struct cgraph_node *node)
  381. {
  382. struct ipa_propagate_frequency_data d = {node, true, true, true, true};
  383. bool changed = false;
  384. /* We can not propagate anything useful about externally visible functions
  385. nor about virtuals. */
  386. if (!node->local.local
  387. || node->alias
  388. || (opt_for_fn (node->decl, flag_devirtualize)
  389. && DECL_VIRTUAL_P (node->decl)))
  390. return false;
  391. gcc_assert (node->analyzed);
  392. if (dump_file && (dump_flags & TDF_DETAILS))
  393. fprintf (dump_file, "Processing frequency %s\n", node->name ());
  394. node->call_for_symbol_and_aliases (ipa_propagate_frequency_1, &d,
  395. true);
  396. if ((d.only_called_at_startup && !d.only_called_at_exit)
  397. && !node->only_called_at_startup)
  398. {
  399. node->only_called_at_startup = true;
  400. if (dump_file)
  401. fprintf (dump_file, "Node %s promoted to only called at startup.\n",
  402. node->name ());
  403. changed = true;
  404. }
  405. if ((d.only_called_at_exit && !d.only_called_at_startup)
  406. && !node->only_called_at_exit)
  407. {
  408. node->only_called_at_exit = true;
  409. if (dump_file)
  410. fprintf (dump_file, "Node %s promoted to only called at exit.\n",
  411. node->name ());
  412. changed = true;
  413. }
  414. /* With profile we can decide on hot/normal based on count. */
  415. if (node->count)
  416. {
  417. bool hot = false;
  418. if (node->count >= get_hot_bb_threshold ())
  419. hot = true;
  420. if (!hot)
  421. hot |= contains_hot_call_p (node);
  422. if (hot)
  423. {
  424. if (node->frequency != NODE_FREQUENCY_HOT)
  425. {
  426. if (dump_file)
  427. fprintf (dump_file, "Node %s promoted to hot.\n",
  428. node->name ());
  429. node->frequency = NODE_FREQUENCY_HOT;
  430. return true;
  431. }
  432. return false;
  433. }
  434. else if (node->frequency == NODE_FREQUENCY_HOT)
  435. {
  436. if (dump_file)
  437. fprintf (dump_file, "Node %s reduced to normal.\n",
  438. node->name ());
  439. node->frequency = NODE_FREQUENCY_NORMAL;
  440. changed = true;
  441. }
  442. }
  443. /* These come either from profile or user hints; never update them. */
  444. if (node->frequency == NODE_FREQUENCY_HOT
  445. || node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
  446. return changed;
  447. if (d.maybe_unlikely_executed)
  448. {
  449. node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
  450. if (dump_file)
  451. fprintf (dump_file, "Node %s promoted to unlikely executed.\n",
  452. node->name ());
  453. changed = true;
  454. }
  455. else if (d.maybe_executed_once && node->frequency != NODE_FREQUENCY_EXECUTED_ONCE)
  456. {
  457. node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
  458. if (dump_file)
  459. fprintf (dump_file, "Node %s promoted to executed once.\n",
  460. node->name ());
  461. changed = true;
  462. }
  463. return changed;
  464. }
  465. /* Simple ipa profile pass propagating frequencies across the callgraph. */
  466. static unsigned int
  467. ipa_profile (void)
  468. {
  469. struct cgraph_node **order;
  470. struct cgraph_edge *e;
  471. int order_pos;
  472. bool something_changed = false;
  473. int i;
  474. gcov_type overall_time = 0, cutoff = 0, cumulated = 0, overall_size = 0;
  475. struct cgraph_node *n,*n2;
  476. int nindirect = 0, ncommon = 0, nunknown = 0, nuseless = 0, nconverted = 0;
  477. bool node_map_initialized = false;
  478. if (dump_file)
  479. dump_histogram (dump_file, histogram);
  480. for (i = 0; i < (int)histogram.length (); i++)
  481. {
  482. overall_time += histogram[i]->count * histogram[i]->time;
  483. overall_size += histogram[i]->size;
  484. }
  485. if (overall_time)
  486. {
  487. gcov_type threshold;
  488. gcc_assert (overall_size);
  489. if (dump_file)
  490. {
  491. gcov_type min, cumulated_time = 0, cumulated_size = 0;
  492. fprintf (dump_file, "Overall time: %"PRId64"\n",
  493. (int64_t)overall_time);
  494. min = get_hot_bb_threshold ();
  495. for (i = 0; i < (int)histogram.length () && histogram[i]->count >= min;
  496. i++)
  497. {
  498. cumulated_time += histogram[i]->count * histogram[i]->time;
  499. cumulated_size += histogram[i]->size;
  500. }
  501. fprintf (dump_file, "GCOV min count: %"PRId64
  502. " Time:%3.2f%% Size:%3.2f%%\n",
  503. (int64_t)min,
  504. cumulated_time * 100.0 / overall_time,
  505. cumulated_size * 100.0 / overall_size);
  506. }
  507. cutoff = (overall_time * PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE) + 500) / 1000;
  508. threshold = 0;
  509. for (i = 0; cumulated < cutoff; i++)
  510. {
  511. cumulated += histogram[i]->count * histogram[i]->time;
  512. threshold = histogram[i]->count;
  513. }
  514. if (!threshold)
  515. threshold = 1;
  516. if (dump_file)
  517. {
  518. gcov_type cumulated_time = 0, cumulated_size = 0;
  519. for (i = 0;
  520. i < (int)histogram.length () && histogram[i]->count >= threshold;
  521. i++)
  522. {
  523. cumulated_time += histogram[i]->count * histogram[i]->time;
  524. cumulated_size += histogram[i]->size;
  525. }
  526. fprintf (dump_file, "Determined min count: %"PRId64
  527. " Time:%3.2f%% Size:%3.2f%%\n",
  528. (int64_t)threshold,
  529. cumulated_time * 100.0 / overall_time,
  530. cumulated_size * 100.0 / overall_size);
  531. }
  532. if (threshold > get_hot_bb_threshold ()
  533. || in_lto_p)
  534. {
  535. if (dump_file)
  536. fprintf (dump_file, "Threshold updated.\n");
  537. set_hot_bb_threshold (threshold);
  538. }
  539. }
  540. histogram.release ();
  541. free_alloc_pool (histogram_pool);
  542. /* Produce speculative calls: we saved common traget from porfiling into
  543. e->common_target_id. Now, at link time, we can look up corresponding
  544. function node and produce speculative call. */
  545. FOR_EACH_DEFINED_FUNCTION (n)
  546. {
  547. bool update = false;
  548. if (!opt_for_fn (n->decl, flag_ipa_profile))
  549. continue;
  550. for (e = n->indirect_calls; e; e = e->next_callee)
  551. {
  552. if (n->count)
  553. nindirect++;
  554. if (e->indirect_info->common_target_id)
  555. {
  556. if (!node_map_initialized)
  557. init_node_map (false);
  558. node_map_initialized = true;
  559. ncommon++;
  560. n2 = find_func_by_profile_id (e->indirect_info->common_target_id);
  561. if (n2)
  562. {
  563. if (dump_file)
  564. {
  565. fprintf (dump_file, "Indirect call -> direct call from"
  566. " other module %s/%i => %s/%i, prob %3.2f\n",
  567. xstrdup_for_dump (n->name ()), n->order,
  568. xstrdup_for_dump (n2->name ()), n2->order,
  569. e->indirect_info->common_target_probability
  570. / (float)REG_BR_PROB_BASE);
  571. }
  572. if (e->indirect_info->common_target_probability
  573. < REG_BR_PROB_BASE / 2)
  574. {
  575. nuseless++;
  576. if (dump_file)
  577. fprintf (dump_file,
  578. "Not speculating: probability is too low.\n");
  579. }
  580. else if (!e->maybe_hot_p ())
  581. {
  582. nuseless++;
  583. if (dump_file)
  584. fprintf (dump_file,
  585. "Not speculating: call is cold.\n");
  586. }
  587. else if (n2->get_availability () <= AVAIL_INTERPOSABLE
  588. && n2->can_be_discarded_p ())
  589. {
  590. nuseless++;
  591. if (dump_file)
  592. fprintf (dump_file,
  593. "Not speculating: target is overwritable "
  594. "and can be discarded.\n");
  595. }
  596. else
  597. {
  598. /* Target may be overwritable, but profile says that
  599. control flow goes to this particular implementation
  600. of N2. Speculate on the local alias to allow inlining.
  601. */
  602. if (!n2->can_be_discarded_p ())
  603. {
  604. cgraph_node *alias;
  605. alias = dyn_cast<cgraph_node *> (n2->noninterposable_alias ());
  606. if (alias)
  607. n2 = alias;
  608. }
  609. nconverted++;
  610. e->make_speculative
  611. (n2,
  612. apply_scale (e->count,
  613. e->indirect_info->common_target_probability),
  614. apply_scale (e->frequency,
  615. e->indirect_info->common_target_probability));
  616. update = true;
  617. }
  618. }
  619. else
  620. {
  621. if (dump_file)
  622. fprintf (dump_file, "Function with profile-id %i not found.\n",
  623. e->indirect_info->common_target_id);
  624. nunknown++;
  625. }
  626. }
  627. }
  628. if (update)
  629. inline_update_overall_summary (n);
  630. }
  631. if (node_map_initialized)
  632. del_node_map ();
  633. if (dump_file && nindirect)
  634. fprintf (dump_file,
  635. "%i indirect calls trained.\n"
  636. "%i (%3.2f%%) have common target.\n"
  637. "%i (%3.2f%%) targets was not found.\n"
  638. "%i (%3.2f%%) speculations seems useless.\n"
  639. "%i (%3.2f%%) speculations produced.\n",
  640. nindirect,
  641. ncommon, ncommon * 100.0 / nindirect,
  642. nunknown, nunknown * 100.0 / nindirect,
  643. nuseless, nuseless * 100.0 / nindirect,
  644. nconverted, nconverted * 100.0 / nindirect);
  645. order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
  646. order_pos = ipa_reverse_postorder (order);
  647. for (i = order_pos - 1; i >= 0; i--)
  648. {
  649. if (order[i]->local.local
  650. && opt_for_fn (order[i]->decl, flag_ipa_profile)
  651. && ipa_propagate_frequency (order[i]))
  652. {
  653. for (e = order[i]->callees; e; e = e->next_callee)
  654. if (e->callee->local.local && !e->callee->aux)
  655. {
  656. something_changed = true;
  657. e->callee->aux = (void *)1;
  658. }
  659. }
  660. order[i]->aux = NULL;
  661. }
  662. while (something_changed)
  663. {
  664. something_changed = false;
  665. for (i = order_pos - 1; i >= 0; i--)
  666. {
  667. if (order[i]->aux
  668. && opt_for_fn (order[i]->decl, flag_ipa_profile)
  669. && ipa_propagate_frequency (order[i]))
  670. {
  671. for (e = order[i]->callees; e; e = e->next_callee)
  672. if (e->callee->local.local && !e->callee->aux)
  673. {
  674. something_changed = true;
  675. e->callee->aux = (void *)1;
  676. }
  677. }
  678. order[i]->aux = NULL;
  679. }
  680. }
  681. free (order);
  682. return 0;
  683. }
  684. namespace {
  685. const pass_data pass_data_ipa_profile =
  686. {
  687. IPA_PASS, /* type */
  688. "profile_estimate", /* name */
  689. OPTGROUP_NONE, /* optinfo_flags */
  690. TV_IPA_PROFILE, /* tv_id */
  691. 0, /* properties_required */
  692. 0, /* properties_provided */
  693. 0, /* properties_destroyed */
  694. 0, /* todo_flags_start */
  695. 0, /* todo_flags_finish */
  696. };
  697. class pass_ipa_profile : public ipa_opt_pass_d
  698. {
  699. public:
  700. pass_ipa_profile (gcc::context *ctxt)
  701. : ipa_opt_pass_d (pass_data_ipa_profile, ctxt,
  702. ipa_profile_generate_summary, /* generate_summary */
  703. ipa_profile_write_summary, /* write_summary */
  704. ipa_profile_read_summary, /* read_summary */
  705. NULL, /* write_optimization_summary */
  706. NULL, /* read_optimization_summary */
  707. NULL, /* stmt_fixup */
  708. 0, /* function_transform_todo_flags_start */
  709. NULL, /* function_transform */
  710. NULL) /* variable_transform */
  711. {}
  712. /* opt_pass methods: */
  713. virtual bool gate (function *) { return flag_ipa_profile || in_lto_p; }
  714. virtual unsigned int execute (function *) { return ipa_profile (); }
  715. }; // class pass_ipa_profile
  716. } // anon namespace
  717. ipa_opt_pass_d *
  718. make_pass_ipa_profile (gcc::context *ctxt)
  719. {
  720. return new pass_ipa_profile (ctxt);
  721. }