ipa-utils.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /* Utilities for ipa analysis.
  2. Copyright (C) 2005-2015 Free Software Foundation, Inc.
  3. Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
  4. This file is part of GCC.
  5. GCC is free software; you can redistribute it and/or modify it under
  6. the terms of the GNU General Public License as published by the Free
  7. Software Foundation; either version 3, or (at your option) any later
  8. version.
  9. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  10. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  12. for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with GCC; see the file COPYING3. If not see
  15. <http://www.gnu.org/licenses/>. */
  16. #include "config.h"
  17. #include "system.h"
  18. #include "coretypes.h"
  19. #include "tm.h"
  20. #include "hash-set.h"
  21. #include "machmode.h"
  22. #include "vec.h"
  23. #include "double-int.h"
  24. #include "input.h"
  25. #include "alias.h"
  26. #include "symtab.h"
  27. #include "options.h"
  28. #include "wide-int.h"
  29. #include "inchash.h"
  30. #include "tree.h"
  31. #include "fold-const.h"
  32. #include "predict.h"
  33. #include "hard-reg-set.h"
  34. #include "input.h"
  35. #include "function.h"
  36. #include "dominance.h"
  37. #include "cfg.h"
  38. #include "basic-block.h"
  39. #include "tree-ssa-alias.h"
  40. #include "internal-fn.h"
  41. #include "gimple-expr.h"
  42. #include "is-a.h"
  43. #include "gimple.h"
  44. #include "tree-inline.h"
  45. #include "dumpfile.h"
  46. #include "langhooks.h"
  47. #include "splay-tree.h"
  48. #include "hash-map.h"
  49. #include "plugin-api.h"
  50. #include "ipa-ref.h"
  51. #include "cgraph.h"
  52. #include "ipa-utils.h"
  53. #include "bitmap.h"
  54. #include "ipa-reference.h"
  55. #include "flags.h"
  56. #include "diagnostic.h"
  57. #include "langhooks.h"
  58. #include "lto-streamer.h"
  59. #include "alloc-pool.h"
  60. #include "symbol-summary.h"
  61. #include "ipa-prop.h"
  62. #include "ipa-inline.h"
  63. /* Debugging function for postorder and inorder code. NOTE is a string
  64. that is printed before the nodes are printed. ORDER is an array of
  65. cgraph_nodes that has COUNT useful nodes in it. */
  66. void
  67. ipa_print_order (FILE* out,
  68. const char * note,
  69. struct cgraph_node** order,
  70. int count)
  71. {
  72. int i;
  73. fprintf (out, "\n\n ordered call graph: %s\n", note);
  74. for (i = count - 1; i >= 0; i--)
  75. order[i]->dump (out);
  76. fprintf (out, "\n");
  77. fflush (out);
  78. }
  79. struct searchc_env {
  80. struct cgraph_node **stack;
  81. int stack_size;
  82. struct cgraph_node **result;
  83. int order_pos;
  84. splay_tree nodes_marked_new;
  85. bool reduce;
  86. bool allow_overwritable;
  87. int count;
  88. };
  89. /* This is an implementation of Tarjan's strongly connected region
  90. finder as reprinted in Aho Hopcraft and Ullman's The Design and
  91. Analysis of Computer Programs (1975) pages 192-193. This version
  92. has been customized for cgraph_nodes. The env parameter is because
  93. it is recursive and there are no nested functions here. This
  94. function should only be called from itself or
  95. ipa_reduced_postorder. ENV is a stack env and would be
  96. unnecessary if C had nested functions. V is the node to start
  97. searching from. */
  98. static void
  99. searchc (struct searchc_env* env, struct cgraph_node *v,
  100. bool (*ignore_edge) (struct cgraph_edge *))
  101. {
  102. struct cgraph_edge *edge;
  103. struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
  104. /* mark node as old */
  105. v_info->new_node = false;
  106. splay_tree_remove (env->nodes_marked_new, v->uid);
  107. v_info->dfn_number = env->count;
  108. v_info->low_link = env->count;
  109. env->count++;
  110. env->stack[(env->stack_size)++] = v;
  111. v_info->on_stack = true;
  112. for (edge = v->callees; edge; edge = edge->next_callee)
  113. {
  114. struct ipa_dfs_info * w_info;
  115. enum availability avail;
  116. struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
  117. if (!w || (ignore_edge && ignore_edge (edge)))
  118. continue;
  119. if (w->aux
  120. && (avail > AVAIL_INTERPOSABLE
  121. || (env->allow_overwritable && avail == AVAIL_INTERPOSABLE)))
  122. {
  123. w_info = (struct ipa_dfs_info *) w->aux;
  124. if (w_info->new_node)
  125. {
  126. searchc (env, w, ignore_edge);
  127. v_info->low_link =
  128. (v_info->low_link < w_info->low_link) ?
  129. v_info->low_link : w_info->low_link;
  130. }
  131. else
  132. if ((w_info->dfn_number < v_info->dfn_number)
  133. && (w_info->on_stack))
  134. v_info->low_link =
  135. (w_info->dfn_number < v_info->low_link) ?
  136. w_info->dfn_number : v_info->low_link;
  137. }
  138. }
  139. if (v_info->low_link == v_info->dfn_number)
  140. {
  141. struct cgraph_node *last = NULL;
  142. struct cgraph_node *x;
  143. struct ipa_dfs_info *x_info;
  144. do {
  145. x = env->stack[--(env->stack_size)];
  146. x_info = (struct ipa_dfs_info *) x->aux;
  147. x_info->on_stack = false;
  148. x_info->scc_no = v_info->dfn_number;
  149. if (env->reduce)
  150. {
  151. x_info->next_cycle = last;
  152. last = x;
  153. }
  154. else
  155. env->result[env->order_pos++] = x;
  156. }
  157. while (v != x);
  158. if (env->reduce)
  159. env->result[env->order_pos++] = v;
  160. }
  161. }
  162. /* Topsort the call graph by caller relation. Put the result in ORDER.
  163. The REDUCE flag is true if you want the cycles reduced to single nodes.
  164. You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
  165. call graph nodes in a reduced node.
  166. Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
  167. IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
  168. for the topological sort. */
  169. int
  170. ipa_reduced_postorder (struct cgraph_node **order,
  171. bool reduce, bool allow_overwritable,
  172. bool (*ignore_edge) (struct cgraph_edge *))
  173. {
  174. struct cgraph_node *node;
  175. struct searchc_env env;
  176. splay_tree_node result;
  177. env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
  178. env.stack_size = 0;
  179. env.result = order;
  180. env.order_pos = 0;
  181. env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
  182. env.count = 1;
  183. env.reduce = reduce;
  184. env.allow_overwritable = allow_overwritable;
  185. FOR_EACH_DEFINED_FUNCTION (node)
  186. {
  187. enum availability avail = node->get_availability ();
  188. if (avail > AVAIL_INTERPOSABLE
  189. || (allow_overwritable
  190. && (avail == AVAIL_INTERPOSABLE)))
  191. {
  192. /* Reuse the info if it is already there. */
  193. struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
  194. if (!info)
  195. info = XCNEW (struct ipa_dfs_info);
  196. info->new_node = true;
  197. info->on_stack = false;
  198. info->next_cycle = NULL;
  199. node->aux = info;
  200. splay_tree_insert (env.nodes_marked_new,
  201. (splay_tree_key)node->uid,
  202. (splay_tree_value)node);
  203. }
  204. else
  205. node->aux = NULL;
  206. }
  207. result = splay_tree_min (env.nodes_marked_new);
  208. while (result)
  209. {
  210. node = (struct cgraph_node *)result->value;
  211. searchc (&env, node, ignore_edge);
  212. result = splay_tree_min (env.nodes_marked_new);
  213. }
  214. splay_tree_delete (env.nodes_marked_new);
  215. free (env.stack);
  216. return env.order_pos;
  217. }
  218. /* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
  219. graph nodes. */
  220. void
  221. ipa_free_postorder_info (void)
  222. {
  223. struct cgraph_node *node;
  224. FOR_EACH_DEFINED_FUNCTION (node)
  225. {
  226. /* Get rid of the aux information. */
  227. if (node->aux)
  228. {
  229. free (node->aux);
  230. node->aux = NULL;
  231. }
  232. }
  233. }
  234. /* Get the set of nodes for the cycle in the reduced call graph starting
  235. from NODE. */
  236. vec<cgraph_node *>
  237. ipa_get_nodes_in_cycle (struct cgraph_node *node)
  238. {
  239. vec<cgraph_node *> v = vNULL;
  240. struct ipa_dfs_info *node_dfs_info;
  241. while (node)
  242. {
  243. v.safe_push (node);
  244. node_dfs_info = (struct ipa_dfs_info *) node->aux;
  245. node = node_dfs_info->next_cycle;
  246. }
  247. return v;
  248. }
  249. /* Return true iff the CS is an edge within a strongly connected component as
  250. computed by ipa_reduced_postorder. */
  251. bool
  252. ipa_edge_within_scc (struct cgraph_edge *cs)
  253. {
  254. struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
  255. struct ipa_dfs_info *callee_dfs;
  256. struct cgraph_node *callee = cs->callee->function_symbol ();
  257. callee_dfs = (struct ipa_dfs_info *) callee->aux;
  258. return (caller_dfs
  259. && callee_dfs
  260. && caller_dfs->scc_no == callee_dfs->scc_no);
  261. }
  262. struct postorder_stack
  263. {
  264. struct cgraph_node *node;
  265. struct cgraph_edge *edge;
  266. int ref;
  267. };
  268. /* Fill array order with all nodes with output flag set in the reverse
  269. topological order. Return the number of elements in the array.
  270. FIXME: While walking, consider aliases, too. */
  271. int
  272. ipa_reverse_postorder (struct cgraph_node **order)
  273. {
  274. struct cgraph_node *node, *node2;
  275. int stack_size = 0;
  276. int order_pos = 0;
  277. struct cgraph_edge *edge;
  278. int pass;
  279. struct ipa_ref *ref = NULL;
  280. struct postorder_stack *stack =
  281. XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
  282. /* We have to deal with cycles nicely, so use a depth first traversal
  283. output algorithm. Ignore the fact that some functions won't need
  284. to be output and put them into order as well, so we get dependencies
  285. right through inline functions. */
  286. FOR_EACH_FUNCTION (node)
  287. node->aux = NULL;
  288. for (pass = 0; pass < 2; pass++)
  289. FOR_EACH_FUNCTION (node)
  290. if (!node->aux
  291. && (pass
  292. || (!node->address_taken
  293. && !node->global.inlined_to
  294. && !node->alias && !node->thunk.thunk_p
  295. && !node->only_called_directly_p ())))
  296. {
  297. stack_size = 0;
  298. stack[stack_size].node = node;
  299. stack[stack_size].edge = node->callers;
  300. stack[stack_size].ref = 0;
  301. node->aux = (void *)(size_t)1;
  302. while (stack_size >= 0)
  303. {
  304. while (true)
  305. {
  306. node2 = NULL;
  307. while (stack[stack_size].edge && !node2)
  308. {
  309. edge = stack[stack_size].edge;
  310. node2 = edge->caller;
  311. stack[stack_size].edge = edge->next_caller;
  312. /* Break possible cycles involving always-inline
  313. functions by ignoring edges from always-inline
  314. functions to non-always-inline functions. */
  315. if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
  316. && !DECL_DISREGARD_INLINE_LIMITS
  317. (edge->callee->function_symbol ()->decl))
  318. node2 = NULL;
  319. }
  320. for (; stack[stack_size].node->iterate_referring (
  321. stack[stack_size].ref,
  322. ref) && !node2;
  323. stack[stack_size].ref++)
  324. {
  325. if (ref->use == IPA_REF_ALIAS)
  326. node2 = dyn_cast <cgraph_node *> (ref->referring);
  327. }
  328. if (!node2)
  329. break;
  330. if (!node2->aux)
  331. {
  332. stack[++stack_size].node = node2;
  333. stack[stack_size].edge = node2->callers;
  334. stack[stack_size].ref = 0;
  335. node2->aux = (void *)(size_t)1;
  336. }
  337. }
  338. order[order_pos++] = stack[stack_size--].node;
  339. }
  340. }
  341. free (stack);
  342. FOR_EACH_FUNCTION (node)
  343. node->aux = NULL;
  344. return order_pos;
  345. }
  346. /* Given a memory reference T, will return the variable at the bottom
  347. of the access. Unlike get_base_address, this will recurse through
  348. INDIRECT_REFS. */
  349. tree
  350. get_base_var (tree t)
  351. {
  352. while (!SSA_VAR_P (t)
  353. && (!CONSTANT_CLASS_P (t))
  354. && TREE_CODE (t) != LABEL_DECL
  355. && TREE_CODE (t) != FUNCTION_DECL
  356. && TREE_CODE (t) != CONST_DECL
  357. && TREE_CODE (t) != CONSTRUCTOR)
  358. {
  359. t = TREE_OPERAND (t, 0);
  360. }
  361. return t;
  362. }
  363. /* SRC and DST are going to be merged. Take SRC's profile and merge it into
  364. DST so it is not going to be lost. Possibly destroy SRC's body on the way
  365. unless PRESERVE_BODY is set. */
  366. void
  367. ipa_merge_profiles (struct cgraph_node *dst,
  368. struct cgraph_node *src,
  369. bool preserve_body)
  370. {
  371. tree oldsrcdecl = src->decl;
  372. struct function *srccfun, *dstcfun;
  373. bool match = true;
  374. if (!src->definition
  375. || !dst->definition)
  376. return;
  377. if (src->frequency < dst->frequency)
  378. src->frequency = dst->frequency;
  379. /* Time profiles are merged. */
  380. if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
  381. dst->tp_first_run = src->tp_first_run;
  382. if (src->profile_id && !dst->profile_id)
  383. dst->profile_id = src->profile_id;
  384. if (!dst->count)
  385. return;
  386. if (symtab->dump_file)
  387. {
  388. fprintf (symtab->dump_file, "Merging profiles of %s/%i to %s/%i\n",
  389. xstrdup_for_dump (src->name ()), src->order,
  390. xstrdup_for_dump (dst->name ()), dst->order);
  391. }
  392. dst->count += src->count;
  393. /* This is ugly. We need to get both function bodies into memory.
  394. If declaration is merged, we need to duplicate it to be able
  395. to load body that is being replaced. This makes symbol table
  396. temporarily inconsistent. */
  397. if (src->decl == dst->decl)
  398. {
  399. struct lto_in_decl_state temp;
  400. struct lto_in_decl_state *state;
  401. /* We are going to move the decl, we want to remove its file decl data.
  402. and link these with the new decl. */
  403. temp.fn_decl = src->decl;
  404. lto_in_decl_state **slot
  405. = src->lto_file_data->function_decl_states->find_slot (&temp,
  406. NO_INSERT);
  407. state = *slot;
  408. src->lto_file_data->function_decl_states->clear_slot (slot);
  409. gcc_assert (state);
  410. /* Duplicate the decl and be sure it does not link into body of DST. */
  411. src->decl = copy_node (src->decl);
  412. DECL_STRUCT_FUNCTION (src->decl) = NULL;
  413. DECL_ARGUMENTS (src->decl) = NULL;
  414. DECL_INITIAL (src->decl) = NULL;
  415. DECL_RESULT (src->decl) = NULL;
  416. /* Associate the decl state with new declaration, so LTO streamer
  417. can look it up. */
  418. state->fn_decl = src->decl;
  419. slot
  420. = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
  421. gcc_assert (!*slot);
  422. *slot = state;
  423. }
  424. src->get_untransformed_body ();
  425. dst->get_untransformed_body ();
  426. srccfun = DECL_STRUCT_FUNCTION (src->decl);
  427. dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
  428. if (n_basic_blocks_for_fn (srccfun)
  429. != n_basic_blocks_for_fn (dstcfun))
  430. {
  431. if (symtab->dump_file)
  432. fprintf (symtab->dump_file,
  433. "Giving up; number of basic block mismatch.\n");
  434. match = false;
  435. }
  436. else if (last_basic_block_for_fn (srccfun)
  437. != last_basic_block_for_fn (dstcfun))
  438. {
  439. if (symtab->dump_file)
  440. fprintf (symtab->dump_file,
  441. "Giving up; last block mismatch.\n");
  442. match = false;
  443. }
  444. else
  445. {
  446. basic_block srcbb, dstbb;
  447. FOR_ALL_BB_FN (srcbb, srccfun)
  448. {
  449. unsigned int i;
  450. dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
  451. if (dstbb == NULL)
  452. {
  453. if (symtab->dump_file)
  454. fprintf (symtab->dump_file,
  455. "No matching block for bb %i.\n",
  456. srcbb->index);
  457. match = false;
  458. break;
  459. }
  460. if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
  461. {
  462. if (symtab->dump_file)
  463. fprintf (symtab->dump_file,
  464. "Edge count mistmatch for bb %i.\n",
  465. srcbb->index);
  466. match = false;
  467. break;
  468. }
  469. for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
  470. {
  471. edge srce = EDGE_SUCC (srcbb, i);
  472. edge dste = EDGE_SUCC (dstbb, i);
  473. if (srce->dest->index != dste->dest->index)
  474. {
  475. if (symtab->dump_file)
  476. fprintf (symtab->dump_file,
  477. "Succ edge mistmatch for bb %i.\n",
  478. srce->dest->index);
  479. match = false;
  480. break;
  481. }
  482. }
  483. }
  484. }
  485. if (match)
  486. {
  487. struct cgraph_edge *e, *e2;
  488. basic_block srcbb, dstbb;
  489. /* TODO: merge also statement histograms. */
  490. FOR_ALL_BB_FN (srcbb, srccfun)
  491. {
  492. unsigned int i;
  493. dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
  494. dstbb->count += srcbb->count;
  495. for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
  496. {
  497. edge srce = EDGE_SUCC (srcbb, i);
  498. edge dste = EDGE_SUCC (dstbb, i);
  499. dste->count += srce->count;
  500. }
  501. }
  502. push_cfun (dstcfun);
  503. counts_to_freqs ();
  504. compute_function_frequency ();
  505. pop_cfun ();
  506. for (e = dst->callees; e; e = e->next_callee)
  507. {
  508. if (e->speculative)
  509. continue;
  510. e->count = gimple_bb (e->call_stmt)->count;
  511. e->frequency = compute_call_stmt_bb_frequency
  512. (dst->decl,
  513. gimple_bb (e->call_stmt));
  514. }
  515. for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
  516. e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
  517. {
  518. gcov_type count = gimple_bb (e->call_stmt)->count;
  519. int freq = compute_call_stmt_bb_frequency
  520. (dst->decl,
  521. gimple_bb (e->call_stmt));
  522. /* When call is speculative, we need to re-distribute probabilities
  523. the same way as they was. This is not really correct because
  524. in the other copy the speculation may differ; but probably it
  525. is not really worth the effort. */
  526. if (e->speculative)
  527. {
  528. cgraph_edge *direct, *indirect;
  529. cgraph_edge *direct2 = NULL, *indirect2 = NULL;
  530. ipa_ref *ref;
  531. e->speculative_call_info (direct, indirect, ref);
  532. gcc_assert (e == indirect);
  533. if (e2 && e2->speculative)
  534. e2->speculative_call_info (direct2, indirect2, ref);
  535. if (indirect->count || direct->count)
  536. {
  537. /* We should mismatch earlier if there is no matching
  538. indirect edge. */
  539. if (!e2)
  540. {
  541. if (dump_file)
  542. fprintf (dump_file,
  543. "Mismatch in merging indirect edges\n");
  544. }
  545. else if (!e2->speculative)
  546. indirect->count += e2->count;
  547. else if (e2->speculative)
  548. {
  549. if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
  550. != DECL_ASSEMBLER_NAME (direct->callee->decl))
  551. {
  552. if (direct2->count >= direct->count)
  553. {
  554. direct->redirect_callee (direct2->callee);
  555. indirect->count += indirect2->count
  556. + direct->count;
  557. direct->count = direct2->count;
  558. }
  559. else
  560. indirect->count += indirect2->count + direct2->count;
  561. }
  562. else
  563. {
  564. direct->count += direct2->count;
  565. indirect->count += indirect2->count;
  566. }
  567. }
  568. int prob = RDIV (direct->count * REG_BR_PROB_BASE ,
  569. direct->count + indirect->count);
  570. direct->frequency = RDIV (freq * prob, REG_BR_PROB_BASE);
  571. indirect->frequency = RDIV (freq * (REG_BR_PROB_BASE - prob),
  572. REG_BR_PROB_BASE);
  573. }
  574. else
  575. /* At the moment we should have only profile feedback based
  576. speculations when merging. */
  577. gcc_unreachable ();
  578. }
  579. else if (e2 && e2->speculative)
  580. {
  581. cgraph_edge *direct, *indirect;
  582. ipa_ref *ref;
  583. e2->speculative_call_info (direct, indirect, ref);
  584. e->count = count;
  585. e->frequency = freq;
  586. int prob = RDIV (direct->count * REG_BR_PROB_BASE, e->count);
  587. e->make_speculative (direct->callee, direct->count,
  588. RDIV (freq * prob, REG_BR_PROB_BASE));
  589. }
  590. else
  591. {
  592. e->count = count;
  593. e->frequency = freq;
  594. }
  595. }
  596. if (!preserve_body)
  597. src->release_body ();
  598. inline_update_overall_summary (dst);
  599. }
  600. /* TODO: if there is no match, we can scale up. */
  601. src->decl = oldsrcdecl;
  602. }
  603. /* Return true if call to DEST is known to be self-recusive call withing FUNC. */
  604. bool
  605. recursive_call_p (tree func, tree dest)
  606. {
  607. struct cgraph_node *dest_node = cgraph_node::get_create (dest);
  608. struct cgraph_node *cnode = cgraph_node::get_create (func);
  609. return dest_node->semantically_equivalent_p (cnode);
  610. }