cfgloopanal.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. /* Natural loop analysis code for GNU compiler.
  2. Copyright (C) 2002-2015 Free Software Foundation, Inc.
  3. This file is part of GCC.
  4. GCC is free software; you can redistribute it and/or modify it under
  5. the terms of the GNU General Public License as published by the Free
  6. Software Foundation; either version 3, or (at your option) any later
  7. version.
  8. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  9. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  11. for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with GCC; see the file COPYING3. If not see
  14. <http://www.gnu.org/licenses/>. */
  15. #include "config.h"
  16. #include "system.h"
  17. #include "coretypes.h"
  18. #include "tm.h"
  19. #include "rtl.h"
  20. #include "hard-reg-set.h"
  21. #include "obstack.h"
  22. #include "predict.h"
  23. #include "vec.h"
  24. #include "hashtab.h"
  25. #include "hash-set.h"
  26. #include "machmode.h"
  27. #include "input.h"
  28. #include "function.h"
  29. #include "dominance.h"
  30. #include "cfg.h"
  31. #include "basic-block.h"
  32. #include "cfgloop.h"
  33. #include "symtab.h"
  34. #include "flags.h"
  35. #include "statistics.h"
  36. #include "double-int.h"
  37. #include "real.h"
  38. #include "fixed-value.h"
  39. #include "alias.h"
  40. #include "wide-int.h"
  41. #include "inchash.h"
  42. #include "tree.h"
  43. #include "insn-config.h"
  44. #include "expmed.h"
  45. #include "dojump.h"
  46. #include "explow.h"
  47. #include "calls.h"
  48. #include "emit-rtl.h"
  49. #include "varasm.h"
  50. #include "stmt.h"
  51. #include "expr.h"
  52. #include "graphds.h"
  53. #include "params.h"
  54. struct target_cfgloop default_target_cfgloop;
  55. #if SWITCHABLE_TARGET
  56. struct target_cfgloop *this_target_cfgloop = &default_target_cfgloop;
  57. #endif
  58. /* Checks whether BB is executed exactly once in each LOOP iteration. */
  59. bool
  60. just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
  61. {
  62. /* It must be executed at least once each iteration. */
  63. if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
  64. return false;
  65. /* And just once. */
  66. if (bb->loop_father != loop)
  67. return false;
  68. /* But this was not enough. We might have some irreducible loop here. */
  69. if (bb->flags & BB_IRREDUCIBLE_LOOP)
  70. return false;
  71. return true;
  72. }
  73. /* Marks blocks and edges that are part of non-recognized loops; i.e. we
  74. throw away all latch edges and mark blocks inside any remaining cycle.
  75. Everything is a bit complicated due to fact we do not want to do this
  76. for parts of cycles that only "pass" through some loop -- i.e. for
  77. each cycle, we want to mark blocks that belong directly to innermost
  78. loop containing the whole cycle.
  79. LOOPS is the loop tree. */
  80. #define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block_for_fn (cfun))
  81. #define BB_REPR(BB) ((BB)->index + 1)
  82. bool
  83. mark_irreducible_loops (void)
  84. {
  85. basic_block act;
  86. struct graph_edge *ge;
  87. edge e;
  88. edge_iterator ei;
  89. int src, dest;
  90. unsigned depth;
  91. struct graph *g;
  92. int num = number_of_loops (cfun);
  93. struct loop *cloop;
  94. bool irred_loop_found = false;
  95. int i;
  96. gcc_assert (current_loops != NULL);
  97. /* Reset the flags. */
  98. FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
  99. EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
  100. {
  101. act->flags &= ~BB_IRREDUCIBLE_LOOP;
  102. FOR_EACH_EDGE (e, ei, act->succs)
  103. e->flags &= ~EDGE_IRREDUCIBLE_LOOP;
  104. }
  105. /* Create the edge lists. */
  106. g = new_graph (last_basic_block_for_fn (cfun) + num);
  107. FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
  108. EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
  109. FOR_EACH_EDGE (e, ei, act->succs)
  110. {
  111. /* Ignore edges to exit. */
  112. if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
  113. continue;
  114. src = BB_REPR (act);
  115. dest = BB_REPR (e->dest);
  116. /* Ignore latch edges. */
  117. if (e->dest->loop_father->header == e->dest
  118. && e->dest->loop_father->latch == act)
  119. continue;
  120. /* Edges inside a single loop should be left where they are. Edges
  121. to subloop headers should lead to representative of the subloop,
  122. but from the same place.
  123. Edges exiting loops should lead from representative
  124. of the son of nearest common ancestor of the loops in that
  125. act lays. */
  126. if (e->dest->loop_father->header == e->dest)
  127. dest = LOOP_REPR (e->dest->loop_father);
  128. if (!flow_bb_inside_loop_p (act->loop_father, e->dest))
  129. {
  130. depth = 1 + loop_depth (find_common_loop (act->loop_father,
  131. e->dest->loop_father));
  132. if (depth == loop_depth (act->loop_father))
  133. cloop = act->loop_father;
  134. else
  135. cloop = (*act->loop_father->superloops)[depth];
  136. src = LOOP_REPR (cloop);
  137. }
  138. add_edge (g, src, dest)->data = e;
  139. }
  140. /* Find the strongly connected components. */
  141. graphds_scc (g, NULL);
  142. /* Mark the irreducible loops. */
  143. for (i = 0; i < g->n_vertices; i++)
  144. for (ge = g->vertices[i].succ; ge; ge = ge->succ_next)
  145. {
  146. edge real = (edge) ge->data;
  147. /* edge E in graph G is irreducible if it connects two vertices in the
  148. same scc. */
  149. /* All edges should lead from a component with higher number to the
  150. one with lower one. */
  151. gcc_assert (g->vertices[ge->src].component >= g->vertices[ge->dest].component);
  152. if (g->vertices[ge->src].component != g->vertices[ge->dest].component)
  153. continue;
  154. real->flags |= EDGE_IRREDUCIBLE_LOOP;
  155. irred_loop_found = true;
  156. if (flow_bb_inside_loop_p (real->src->loop_father, real->dest))
  157. real->src->flags |= BB_IRREDUCIBLE_LOOP;
  158. }
  159. free_graph (g);
  160. loops_state_set (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
  161. return irred_loop_found;
  162. }
  163. /* Counts number of insns inside LOOP. */
  164. int
  165. num_loop_insns (const struct loop *loop)
  166. {
  167. basic_block *bbs, bb;
  168. unsigned i, ninsns = 0;
  169. rtx_insn *insn;
  170. bbs = get_loop_body (loop);
  171. for (i = 0; i < loop->num_nodes; i++)
  172. {
  173. bb = bbs[i];
  174. FOR_BB_INSNS (bb, insn)
  175. if (NONDEBUG_INSN_P (insn))
  176. ninsns++;
  177. }
  178. free (bbs);
  179. if (!ninsns)
  180. ninsns = 1; /* To avoid division by zero. */
  181. return ninsns;
  182. }
  183. /* Counts number of insns executed on average per iteration LOOP. */
  184. int
  185. average_num_loop_insns (const struct loop *loop)
  186. {
  187. basic_block *bbs, bb;
  188. unsigned i, binsns, ninsns, ratio;
  189. rtx_insn *insn;
  190. ninsns = 0;
  191. bbs = get_loop_body (loop);
  192. for (i = 0; i < loop->num_nodes; i++)
  193. {
  194. bb = bbs[i];
  195. binsns = 0;
  196. FOR_BB_INSNS (bb, insn)
  197. if (NONDEBUG_INSN_P (insn))
  198. binsns++;
  199. ratio = loop->header->frequency == 0
  200. ? BB_FREQ_MAX
  201. : (bb->frequency * BB_FREQ_MAX) / loop->header->frequency;
  202. ninsns += binsns * ratio;
  203. }
  204. free (bbs);
  205. ninsns /= BB_FREQ_MAX;
  206. if (!ninsns)
  207. ninsns = 1; /* To avoid division by zero. */
  208. return ninsns;
  209. }
  210. /* Returns expected number of iterations of LOOP, according to
  211. measured or guessed profile. No bounding is done on the
  212. value. */
  213. gcov_type
  214. expected_loop_iterations_unbounded (const struct loop *loop)
  215. {
  216. edge e;
  217. edge_iterator ei;
  218. if (loop->latch->count || loop->header->count)
  219. {
  220. gcov_type count_in, count_latch, expected;
  221. count_in = 0;
  222. count_latch = 0;
  223. FOR_EACH_EDGE (e, ei, loop->header->preds)
  224. if (e->src == loop->latch)
  225. count_latch = e->count;
  226. else
  227. count_in += e->count;
  228. if (count_in == 0)
  229. expected = count_latch * 2;
  230. else
  231. expected = (count_latch + count_in - 1) / count_in;
  232. return expected;
  233. }
  234. else
  235. {
  236. int freq_in, freq_latch;
  237. freq_in = 0;
  238. freq_latch = 0;
  239. FOR_EACH_EDGE (e, ei, loop->header->preds)
  240. if (e->src == loop->latch)
  241. freq_latch = EDGE_FREQUENCY (e);
  242. else
  243. freq_in += EDGE_FREQUENCY (e);
  244. if (freq_in == 0)
  245. return freq_latch * 2;
  246. return (freq_latch + freq_in - 1) / freq_in;
  247. }
  248. }
  249. /* Returns expected number of LOOP iterations. The returned value is bounded
  250. by REG_BR_PROB_BASE. */
  251. unsigned
  252. expected_loop_iterations (const struct loop *loop)
  253. {
  254. gcov_type expected = expected_loop_iterations_unbounded (loop);
  255. return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
  256. }
  257. /* Returns the maximum level of nesting of subloops of LOOP. */
  258. unsigned
  259. get_loop_level (const struct loop *loop)
  260. {
  261. const struct loop *ploop;
  262. unsigned mx = 0, l;
  263. for (ploop = loop->inner; ploop; ploop = ploop->next)
  264. {
  265. l = get_loop_level (ploop);
  266. if (l >= mx)
  267. mx = l + 1;
  268. }
  269. return mx;
  270. }
  271. /* Initialize the constants for computing set costs. */
  272. void
  273. init_set_costs (void)
  274. {
  275. int speed;
  276. rtx_insn *seq;
  277. rtx reg1 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER);
  278. rtx reg2 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER + 1);
  279. rtx addr = gen_raw_REG (Pmode, FIRST_PSEUDO_REGISTER + 2);
  280. rtx mem = validize_mem (gen_rtx_MEM (SImode, addr));
  281. unsigned i;
  282. target_avail_regs = 0;
  283. target_clobbered_regs = 0;
  284. for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
  285. if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i)
  286. && !fixed_regs[i])
  287. {
  288. target_avail_regs++;
  289. if (call_used_regs[i])
  290. target_clobbered_regs++;
  291. }
  292. target_res_regs = 3;
  293. for (speed = 0; speed < 2; speed++)
  294. {
  295. crtl->maybe_hot_insn_p = speed;
  296. /* Set up the costs for using extra registers:
  297. 1) If not many free registers remain, we should prefer having an
  298. additional move to decreasing the number of available registers.
  299. (TARGET_REG_COST).
  300. 2) If no registers are available, we need to spill, which may require
  301. storing the old value to memory and loading it back
  302. (TARGET_SPILL_COST). */
  303. start_sequence ();
  304. emit_move_insn (reg1, reg2);
  305. seq = get_insns ();
  306. end_sequence ();
  307. target_reg_cost [speed] = seq_cost (seq, speed);
  308. start_sequence ();
  309. emit_move_insn (mem, reg1);
  310. emit_move_insn (reg2, mem);
  311. seq = get_insns ();
  312. end_sequence ();
  313. target_spill_cost [speed] = seq_cost (seq, speed);
  314. }
  315. default_rtl_profile ();
  316. }
  317. /* Estimates cost of increased register pressure caused by making N_NEW new
  318. registers live around the loop. N_OLD is the number of registers live
  319. around the loop. If CALL_P is true, also take into account that
  320. call-used registers may be clobbered in the loop body, reducing the
  321. number of available registers before we spill. */
  322. unsigned
  323. estimate_reg_pressure_cost (unsigned n_new, unsigned n_old, bool speed,
  324. bool call_p)
  325. {
  326. unsigned cost;
  327. unsigned regs_needed = n_new + n_old;
  328. unsigned available_regs = target_avail_regs;
  329. /* If there is a call in the loop body, the call-clobbered registers
  330. are not available for loop invariants. */
  331. if (call_p)
  332. available_regs = available_regs - target_clobbered_regs;
  333. /* If we have enough registers, we should use them and not restrict
  334. the transformations unnecessarily. */
  335. if (regs_needed + target_res_regs <= available_regs)
  336. return 0;
  337. if (regs_needed <= available_regs)
  338. /* If we are close to running out of registers, try to preserve
  339. them. */
  340. cost = target_reg_cost [speed] * n_new;
  341. else
  342. /* If we run out of registers, it is very expensive to add another
  343. one. */
  344. cost = target_spill_cost [speed] * n_new;
  345. if (optimize && (flag_ira_region == IRA_REGION_ALL
  346. || flag_ira_region == IRA_REGION_MIXED)
  347. && number_of_loops (cfun) <= (unsigned) IRA_MAX_LOOPS_NUM)
  348. /* IRA regional allocation deals with high register pressure
  349. better. So decrease the cost (to do more accurate the cost
  350. calculation for IRA, we need to know how many registers lives
  351. through the loop transparently). */
  352. cost /= 2;
  353. return cost;
  354. }
  355. /* Sets EDGE_LOOP_EXIT flag for all loop exits. */
  356. void
  357. mark_loop_exit_edges (void)
  358. {
  359. basic_block bb;
  360. edge e;
  361. if (number_of_loops (cfun) <= 1)
  362. return;
  363. FOR_EACH_BB_FN (bb, cfun)
  364. {
  365. edge_iterator ei;
  366. FOR_EACH_EDGE (e, ei, bb->succs)
  367. {
  368. if (loop_outer (bb->loop_father)
  369. && loop_exit_edge_p (bb->loop_father, e))
  370. e->flags |= EDGE_LOOP_EXIT;
  371. else
  372. e->flags &= ~EDGE_LOOP_EXIT;
  373. }
  374. }
  375. }
  376. /* Return exit edge if loop has only one exit that is likely
  377. to be executed on runtime (i.e. it is not EH or leading
  378. to noreturn call. */
  379. edge
  380. single_likely_exit (struct loop *loop)
  381. {
  382. edge found = single_exit (loop);
  383. vec<edge> exits;
  384. unsigned i;
  385. edge ex;
  386. if (found)
  387. return found;
  388. exits = get_loop_exit_edges (loop);
  389. FOR_EACH_VEC_ELT (exits, i, ex)
  390. {
  391. if (ex->flags & (EDGE_EH | EDGE_ABNORMAL_CALL))
  392. continue;
  393. /* The constant of 5 is set in a way so noreturn calls are
  394. ruled out by this test. The static branch prediction algorithm
  395. will not assign such a low probability to conditionals for usual
  396. reasons. */
  397. if (profile_status_for_fn (cfun) != PROFILE_ABSENT
  398. && ex->probability < 5 && !ex->count)
  399. continue;
  400. if (!found)
  401. found = ex;
  402. else
  403. {
  404. exits.release ();
  405. return NULL;
  406. }
  407. }
  408. exits.release ();
  409. return found;
  410. }
  411. /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
  412. order against direction of edges from latch. Specially, if
  413. header != latch, latch is the 1-st block. */
  414. vec<basic_block>
  415. get_loop_hot_path (const struct loop *loop)
  416. {
  417. basic_block bb = loop->header;
  418. vec<basic_block> path = vNULL;
  419. bitmap visited = BITMAP_ALLOC (NULL);
  420. while (true)
  421. {
  422. edge_iterator ei;
  423. edge e;
  424. edge best = NULL;
  425. path.safe_push (bb);
  426. bitmap_set_bit (visited, bb->index);
  427. FOR_EACH_EDGE (e, ei, bb->succs)
  428. if ((!best || e->probability > best->probability)
  429. && !loop_exit_edge_p (loop, e)
  430. && !bitmap_bit_p (visited, e->dest->index))
  431. best = e;
  432. if (!best || best->dest == loop->header)
  433. break;
  434. bb = best->dest;
  435. }
  436. BITMAP_FREE (visited);
  437. return path;
  438. }