lto-streamer-in.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693
  1. /* Read the GIMPLE representation from a file stream.
  2. Copyright (C) 2009-2015 Free Software Foundation, Inc.
  3. Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
  4. Re-implemented by Diego Novillo <dnovillo@google.com>
  5. This file is part of GCC.
  6. GCC is free software; you can redistribute it and/or modify it under
  7. the terms of the GNU General Public License as published by the Free
  8. Software Foundation; either version 3, or (at your option) any later
  9. version.
  10. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  11. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  13. for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with GCC; see the file COPYING3. If not see
  16. <http://www.gnu.org/licenses/>. */
  17. #include "config.h"
  18. #include "system.h"
  19. #include "coretypes.h"
  20. #include "tm.h"
  21. #include "toplev.h"
  22. #include "hash-set.h"
  23. #include "machmode.h"
  24. #include "vec.h"
  25. #include "double-int.h"
  26. #include "input.h"
  27. #include "alias.h"
  28. #include "symtab.h"
  29. #include "wide-int.h"
  30. #include "inchash.h"
  31. #include "tree.h"
  32. #include "fold-const.h"
  33. #include "stringpool.h"
  34. #include "hashtab.h"
  35. #include "hard-reg-set.h"
  36. #include "function.h"
  37. #include "rtl.h"
  38. #include "flags.h"
  39. #include "statistics.h"
  40. #include "real.h"
  41. #include "fixed-value.h"
  42. #include "insn-config.h"
  43. #include "expmed.h"
  44. #include "dojump.h"
  45. #include "explow.h"
  46. #include "calls.h"
  47. #include "emit-rtl.h"
  48. #include "varasm.h"
  49. #include "stmt.h"
  50. #include "expr.h"
  51. #include "params.h"
  52. #include "predict.h"
  53. #include "dominance.h"
  54. #include "cfg.h"
  55. #include "basic-block.h"
  56. #include "tree-ssa-alias.h"
  57. #include "internal-fn.h"
  58. #include "gimple-expr.h"
  59. #include "is-a.h"
  60. #include "gimple.h"
  61. #include "gimple-iterator.h"
  62. #include "gimple-ssa.h"
  63. #include "tree-cfg.h"
  64. #include "tree-ssanames.h"
  65. #include "tree-into-ssa.h"
  66. #include "tree-dfa.h"
  67. #include "tree-ssa.h"
  68. #include "tree-pass.h"
  69. #include "diagnostic.h"
  70. #include "except.h"
  71. #include "debug.h"
  72. #include "hash-map.h"
  73. #include "plugin-api.h"
  74. #include "ipa-ref.h"
  75. #include "cgraph.h"
  76. #include "ipa-utils.h"
  77. #include "data-streamer.h"
  78. #include "gimple-streamer.h"
  79. #include "lto-streamer.h"
  80. #include "tree-streamer.h"
  81. #include "streamer-hooks.h"
  82. #include "cfgloop.h"
  83. struct freeing_string_slot_hasher : string_slot_hasher
  84. {
  85. static inline void remove (value_type *);
  86. };
  87. inline void
  88. freeing_string_slot_hasher::remove (value_type *v)
  89. {
  90. free (v);
  91. }
  92. /* The table to hold the file names. */
  93. static hash_table<freeing_string_slot_hasher> *file_name_hash_table;
  94. /* Check that tag ACTUAL has one of the given values. NUM_TAGS is the
  95. number of valid tag values to check. */
  96. void
  97. lto_tag_check_set (enum LTO_tags actual, int ntags, ...)
  98. {
  99. va_list ap;
  100. int i;
  101. va_start (ap, ntags);
  102. for (i = 0; i < ntags; i++)
  103. if ((unsigned) actual == va_arg (ap, unsigned))
  104. {
  105. va_end (ap);
  106. return;
  107. }
  108. va_end (ap);
  109. internal_error ("bytecode stream: unexpected tag %s", lto_tag_name (actual));
  110. }
  111. /* Read LENGTH bytes from STREAM to ADDR. */
  112. void
  113. lto_input_data_block (struct lto_input_block *ib, void *addr, size_t length)
  114. {
  115. size_t i;
  116. unsigned char *const buffer = (unsigned char *const) addr;
  117. for (i = 0; i < length; i++)
  118. buffer[i] = streamer_read_uchar (ib);
  119. }
  120. /* Lookup STRING in file_name_hash_table. If found, return the existing
  121. string, otherwise insert STRING as the canonical version. */
  122. static const char *
  123. canon_file_name (const char *string)
  124. {
  125. string_slot **slot;
  126. struct string_slot s_slot;
  127. size_t len = strlen (string);
  128. s_slot.s = string;
  129. s_slot.len = len;
  130. slot = file_name_hash_table->find_slot (&s_slot, INSERT);
  131. if (*slot == NULL)
  132. {
  133. char *saved_string;
  134. struct string_slot *new_slot;
  135. saved_string = (char *) xmalloc (len + 1);
  136. new_slot = XCNEW (struct string_slot);
  137. memcpy (saved_string, string, len + 1);
  138. new_slot->s = saved_string;
  139. new_slot->len = len;
  140. *slot = new_slot;
  141. return saved_string;
  142. }
  143. else
  144. {
  145. struct string_slot *old_slot = *slot;
  146. return old_slot->s;
  147. }
  148. }
  149. /* Pointer to currently alive instance of lto_location_cache. */
  150. lto_location_cache *lto_location_cache::current_cache;
  151. /* Sort locations in source order. Start with file from last application. */
  152. int
  153. lto_location_cache::cmp_loc (const void *pa, const void *pb)
  154. {
  155. const cached_location *a = ((const cached_location *)pa);
  156. const cached_location *b = ((const cached_location *)pb);
  157. const char *current_file = current_cache->current_file;
  158. int current_line = current_cache->current_line;
  159. if (a->file == current_file && b->file != current_file)
  160. return -1;
  161. if (a->file != current_file && b->file == current_file)
  162. return 1;
  163. if (a->file == current_file && b->file == current_file)
  164. {
  165. if (a->line == current_line && b->line != current_line)
  166. return -1;
  167. if (a->line != current_line && b->line == current_line)
  168. return 1;
  169. }
  170. if (a->file != b->file)
  171. return strcmp (a->file, b->file);
  172. if (a->line != b->line)
  173. return a->line - b->line;
  174. return a->col - b->col;
  175. }
  176. /* Apply all changes in location cache. Add locations into linemap and patch
  177. trees. */
  178. bool
  179. lto_location_cache::apply_location_cache ()
  180. {
  181. static const char *prev_file;
  182. if (!loc_cache.length ())
  183. return false;
  184. if (loc_cache.length () > 1)
  185. loc_cache.qsort (cmp_loc);
  186. for (unsigned int i = 0; i < loc_cache.length (); i++)
  187. {
  188. struct cached_location loc = loc_cache[i];
  189. if (current_file != loc.file)
  190. linemap_add (line_table, prev_file ? LC_RENAME : LC_ENTER,
  191. false, loc.file, loc.line);
  192. else if (current_line != loc.line)
  193. {
  194. int max = loc.col;
  195. for (unsigned int j = i + 1; j < loc_cache.length (); j++)
  196. if (loc.file != loc_cache[j].file
  197. || loc.line != loc_cache[j].line)
  198. break;
  199. else if (max < loc_cache[j].col)
  200. max = loc_cache[j].col;
  201. linemap_line_start (line_table, loc.line, max + 1);
  202. }
  203. gcc_assert (*loc.loc == BUILTINS_LOCATION + 1);
  204. if (current_file == loc.file && current_line == loc.line
  205. && current_col == loc.col)
  206. *loc.loc = current_loc;
  207. else
  208. current_loc = *loc.loc = linemap_position_for_column (line_table,
  209. loc.col);
  210. current_line = loc.line;
  211. prev_file = current_file = loc.file;
  212. current_col = loc.col;
  213. }
  214. loc_cache.truncate (0);
  215. accepted_length = 0;
  216. return true;
  217. }
  218. /* Tree merging did not suceed; mark all changes in the cache as accepted. */
  219. void
  220. lto_location_cache::accept_location_cache ()
  221. {
  222. gcc_assert (current_cache == this);
  223. accepted_length = loc_cache.length ();
  224. }
  225. /* Tree merging did suceed; throw away recent changes. */
  226. void
  227. lto_location_cache::revert_location_cache ()
  228. {
  229. loc_cache.truncate (accepted_length);
  230. }
  231. /* Read a location bitpack from input block IB and either update *LOC directly
  232. or add it to the location cache.
  233. It is neccesary to call apply_location_cache to get *LOC updated. */
  234. void
  235. lto_location_cache::input_location (location_t *loc, struct bitpack_d *bp,
  236. struct data_in *data_in)
  237. {
  238. static const char *stream_file;
  239. static int stream_line;
  240. static int stream_col;
  241. bool file_change, line_change, column_change;
  242. gcc_assert (current_cache == this);
  243. if (bp_unpack_value (bp, 1))
  244. {
  245. *loc = UNKNOWN_LOCATION;
  246. return;
  247. }
  248. *loc = BUILTINS_LOCATION + 1;
  249. file_change = bp_unpack_value (bp, 1);
  250. line_change = bp_unpack_value (bp, 1);
  251. column_change = bp_unpack_value (bp, 1);
  252. if (file_change)
  253. stream_file = canon_file_name (bp_unpack_string (data_in, bp));
  254. if (line_change)
  255. stream_line = bp_unpack_var_len_unsigned (bp);
  256. if (column_change)
  257. stream_col = bp_unpack_var_len_unsigned (bp);
  258. /* This optimization saves location cache operations druing gimple
  259. streaming. */
  260. if (current_file == stream_file && current_line == stream_line
  261. && current_col == stream_col)
  262. {
  263. *loc = current_loc;
  264. return;
  265. }
  266. struct cached_location entry = {stream_file, loc, stream_line, stream_col};
  267. loc_cache.safe_push (entry);
  268. }
  269. /* Read a location bitpack from input block IB and either update *LOC directly
  270. or add it to the location cache.
  271. It is neccesary to call apply_location_cache to get *LOC updated. */
  272. void
  273. lto_input_location (location_t *loc, struct bitpack_d *bp,
  274. struct data_in *data_in)
  275. {
  276. data_in->location_cache.input_location (loc, bp, data_in);
  277. }
  278. /* Read location and return it instead of going through location caching.
  279. This should be used only when the resulting location is not going to be
  280. discarded. */
  281. location_t
  282. stream_input_location_now (struct bitpack_d *bp, struct data_in *data_in)
  283. {
  284. location_t loc;
  285. stream_input_location (&loc, bp, data_in);
  286. data_in->location_cache.apply_location_cache ();
  287. return loc;
  288. }
  289. /* Read a reference to a tree node from DATA_IN using input block IB.
  290. TAG is the expected node that should be found in IB, if TAG belongs
  291. to one of the indexable trees, expect to read a reference index to
  292. be looked up in one of the symbol tables, otherwise read the pysical
  293. representation of the tree using stream_read_tree. FN is the
  294. function scope for the read tree. */
  295. tree
  296. lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
  297. struct function *fn, enum LTO_tags tag)
  298. {
  299. unsigned HOST_WIDE_INT ix_u;
  300. tree result = NULL_TREE;
  301. lto_tag_check_range (tag, LTO_field_decl_ref, LTO_namelist_decl_ref);
  302. switch (tag)
  303. {
  304. case LTO_type_ref:
  305. ix_u = streamer_read_uhwi (ib);
  306. result = lto_file_decl_data_get_type (data_in->file_data, ix_u);
  307. break;
  308. case LTO_ssa_name_ref:
  309. ix_u = streamer_read_uhwi (ib);
  310. result = (*SSANAMES (fn))[ix_u];
  311. break;
  312. case LTO_field_decl_ref:
  313. ix_u = streamer_read_uhwi (ib);
  314. result = lto_file_decl_data_get_field_decl (data_in->file_data, ix_u);
  315. break;
  316. case LTO_function_decl_ref:
  317. ix_u = streamer_read_uhwi (ib);
  318. result = lto_file_decl_data_get_fn_decl (data_in->file_data, ix_u);
  319. break;
  320. case LTO_type_decl_ref:
  321. ix_u = streamer_read_uhwi (ib);
  322. result = lto_file_decl_data_get_type_decl (data_in->file_data, ix_u);
  323. break;
  324. case LTO_namespace_decl_ref:
  325. ix_u = streamer_read_uhwi (ib);
  326. result = lto_file_decl_data_get_namespace_decl (data_in->file_data, ix_u);
  327. break;
  328. case LTO_global_decl_ref:
  329. case LTO_result_decl_ref:
  330. case LTO_const_decl_ref:
  331. case LTO_imported_decl_ref:
  332. case LTO_label_decl_ref:
  333. case LTO_translation_unit_decl_ref:
  334. case LTO_namelist_decl_ref:
  335. ix_u = streamer_read_uhwi (ib);
  336. result = lto_file_decl_data_get_var_decl (data_in->file_data, ix_u);
  337. break;
  338. default:
  339. gcc_unreachable ();
  340. }
  341. gcc_assert (result);
  342. return result;
  343. }
  344. /* Read and return a double-linked list of catch handlers from input
  345. block IB, using descriptors in DATA_IN. */
  346. static struct eh_catch_d *
  347. lto_input_eh_catch_list (struct lto_input_block *ib, struct data_in *data_in,
  348. eh_catch *last_p)
  349. {
  350. eh_catch first;
  351. enum LTO_tags tag;
  352. *last_p = first = NULL;
  353. tag = streamer_read_record_start (ib);
  354. while (tag)
  355. {
  356. tree list;
  357. eh_catch n;
  358. lto_tag_check_range (tag, LTO_eh_catch, LTO_eh_catch);
  359. /* Read the catch node. */
  360. n = ggc_cleared_alloc<eh_catch_d> ();
  361. n->type_list = stream_read_tree (ib, data_in);
  362. n->filter_list = stream_read_tree (ib, data_in);
  363. n->label = stream_read_tree (ib, data_in);
  364. /* Register all the types in N->FILTER_LIST. */
  365. for (list = n->filter_list; list; list = TREE_CHAIN (list))
  366. add_type_for_runtime (TREE_VALUE (list));
  367. /* Chain N to the end of the list. */
  368. if (*last_p)
  369. (*last_p)->next_catch = n;
  370. n->prev_catch = *last_p;
  371. *last_p = n;
  372. /* Set the head of the list the first time through the loop. */
  373. if (first == NULL)
  374. first = n;
  375. tag = streamer_read_record_start (ib);
  376. }
  377. return first;
  378. }
  379. /* Read and return EH region IX from input block IB, using descriptors
  380. in DATA_IN. */
  381. static eh_region
  382. input_eh_region (struct lto_input_block *ib, struct data_in *data_in, int ix)
  383. {
  384. enum LTO_tags tag;
  385. eh_region r;
  386. /* Read the region header. */
  387. tag = streamer_read_record_start (ib);
  388. if (tag == LTO_null)
  389. return NULL;
  390. r = ggc_cleared_alloc<eh_region_d> ();
  391. r->index = streamer_read_hwi (ib);
  392. gcc_assert (r->index == ix);
  393. /* Read all the region pointers as region numbers. We'll fix up
  394. the pointers once the whole array has been read. */
  395. r->outer = (eh_region) (intptr_t) streamer_read_hwi (ib);
  396. r->inner = (eh_region) (intptr_t) streamer_read_hwi (ib);
  397. r->next_peer = (eh_region) (intptr_t) streamer_read_hwi (ib);
  398. switch (tag)
  399. {
  400. case LTO_ert_cleanup:
  401. r->type = ERT_CLEANUP;
  402. break;
  403. case LTO_ert_try:
  404. {
  405. struct eh_catch_d *last_catch;
  406. r->type = ERT_TRY;
  407. r->u.eh_try.first_catch = lto_input_eh_catch_list (ib, data_in,
  408. &last_catch);
  409. r->u.eh_try.last_catch = last_catch;
  410. break;
  411. }
  412. case LTO_ert_allowed_exceptions:
  413. {
  414. tree l;
  415. r->type = ERT_ALLOWED_EXCEPTIONS;
  416. r->u.allowed.type_list = stream_read_tree (ib, data_in);
  417. r->u.allowed.label = stream_read_tree (ib, data_in);
  418. r->u.allowed.filter = streamer_read_uhwi (ib);
  419. for (l = r->u.allowed.type_list; l ; l = TREE_CHAIN (l))
  420. add_type_for_runtime (TREE_VALUE (l));
  421. }
  422. break;
  423. case LTO_ert_must_not_throw:
  424. {
  425. r->type = ERT_MUST_NOT_THROW;
  426. r->u.must_not_throw.failure_decl = stream_read_tree (ib, data_in);
  427. bitpack_d bp = streamer_read_bitpack (ib);
  428. r->u.must_not_throw.failure_loc
  429. = stream_input_location_now (&bp, data_in);
  430. }
  431. break;
  432. default:
  433. gcc_unreachable ();
  434. }
  435. r->landing_pads = (eh_landing_pad) (intptr_t) streamer_read_hwi (ib);
  436. return r;
  437. }
  438. /* Read and return EH landing pad IX from input block IB, using descriptors
  439. in DATA_IN. */
  440. static eh_landing_pad
  441. input_eh_lp (struct lto_input_block *ib, struct data_in *data_in, int ix)
  442. {
  443. enum LTO_tags tag;
  444. eh_landing_pad lp;
  445. /* Read the landing pad header. */
  446. tag = streamer_read_record_start (ib);
  447. if (tag == LTO_null)
  448. return NULL;
  449. lto_tag_check_range (tag, LTO_eh_landing_pad, LTO_eh_landing_pad);
  450. lp = ggc_cleared_alloc<eh_landing_pad_d> ();
  451. lp->index = streamer_read_hwi (ib);
  452. gcc_assert (lp->index == ix);
  453. lp->next_lp = (eh_landing_pad) (intptr_t) streamer_read_hwi (ib);
  454. lp->region = (eh_region) (intptr_t) streamer_read_hwi (ib);
  455. lp->post_landing_pad = stream_read_tree (ib, data_in);
  456. return lp;
  457. }
  458. /* After reading the EH regions, pointers to peer and children regions
  459. are region numbers. This converts all these region numbers into
  460. real pointers into the rematerialized regions for FN. ROOT_REGION
  461. is the region number for the root EH region in FN. */
  462. static void
  463. fixup_eh_region_pointers (struct function *fn, HOST_WIDE_INT root_region)
  464. {
  465. unsigned i;
  466. vec<eh_region, va_gc> *eh_array = fn->eh->region_array;
  467. vec<eh_landing_pad, va_gc> *lp_array = fn->eh->lp_array;
  468. eh_region r;
  469. eh_landing_pad lp;
  470. gcc_assert (eh_array && lp_array);
  471. gcc_assert (root_region >= 0);
  472. fn->eh->region_tree = (*eh_array)[root_region];
  473. #define FIXUP_EH_REGION(r) (r) = (*eh_array)[(HOST_WIDE_INT) (intptr_t) (r)]
  474. #define FIXUP_EH_LP(p) (p) = (*lp_array)[(HOST_WIDE_INT) (intptr_t) (p)]
  475. /* Convert all the index numbers stored in pointer fields into
  476. pointers to the corresponding slots in the EH region array. */
  477. FOR_EACH_VEC_ELT (*eh_array, i, r)
  478. {
  479. /* The array may contain NULL regions. */
  480. if (r == NULL)
  481. continue;
  482. gcc_assert (i == (unsigned) r->index);
  483. FIXUP_EH_REGION (r->outer);
  484. FIXUP_EH_REGION (r->inner);
  485. FIXUP_EH_REGION (r->next_peer);
  486. FIXUP_EH_LP (r->landing_pads);
  487. }
  488. /* Convert all the index numbers stored in pointer fields into
  489. pointers to the corresponding slots in the EH landing pad array. */
  490. FOR_EACH_VEC_ELT (*lp_array, i, lp)
  491. {
  492. /* The array may contain NULL landing pads. */
  493. if (lp == NULL)
  494. continue;
  495. gcc_assert (i == (unsigned) lp->index);
  496. FIXUP_EH_LP (lp->next_lp);
  497. FIXUP_EH_REGION (lp->region);
  498. }
  499. #undef FIXUP_EH_REGION
  500. #undef FIXUP_EH_LP
  501. }
  502. /* Initialize EH support. */
  503. void
  504. lto_init_eh (void)
  505. {
  506. static bool eh_initialized_p = false;
  507. if (eh_initialized_p)
  508. return;
  509. /* Contrary to most other FEs, we only initialize EH support when at
  510. least one of the files in the set contains exception regions in
  511. it. Since this happens much later than the call to init_eh in
  512. lang_dependent_init, we have to set flag_exceptions and call
  513. init_eh again to initialize the EH tables. */
  514. flag_exceptions = 1;
  515. init_eh ();
  516. eh_initialized_p = true;
  517. }
  518. /* Read the exception table for FN from IB using the data descriptors
  519. in DATA_IN. */
  520. static void
  521. input_eh_regions (struct lto_input_block *ib, struct data_in *data_in,
  522. struct function *fn)
  523. {
  524. HOST_WIDE_INT i, root_region, len;
  525. enum LTO_tags tag;
  526. tag = streamer_read_record_start (ib);
  527. if (tag == LTO_null)
  528. return;
  529. lto_tag_check_range (tag, LTO_eh_table, LTO_eh_table);
  530. /* If the file contains EH regions, then it was compiled with
  531. -fexceptions. In that case, initialize the backend EH
  532. machinery. */
  533. lto_init_eh ();
  534. gcc_assert (fn->eh);
  535. root_region = streamer_read_hwi (ib);
  536. gcc_assert (root_region == (int) root_region);
  537. /* Read the EH region array. */
  538. len = streamer_read_hwi (ib);
  539. gcc_assert (len == (int) len);
  540. if (len > 0)
  541. {
  542. vec_safe_grow_cleared (fn->eh->region_array, len);
  543. for (i = 0; i < len; i++)
  544. {
  545. eh_region r = input_eh_region (ib, data_in, i);
  546. (*fn->eh->region_array)[i] = r;
  547. }
  548. }
  549. /* Read the landing pads. */
  550. len = streamer_read_hwi (ib);
  551. gcc_assert (len == (int) len);
  552. if (len > 0)
  553. {
  554. vec_safe_grow_cleared (fn->eh->lp_array, len);
  555. for (i = 0; i < len; i++)
  556. {
  557. eh_landing_pad lp = input_eh_lp (ib, data_in, i);
  558. (*fn->eh->lp_array)[i] = lp;
  559. }
  560. }
  561. /* Read the runtime type data. */
  562. len = streamer_read_hwi (ib);
  563. gcc_assert (len == (int) len);
  564. if (len > 0)
  565. {
  566. vec_safe_grow_cleared (fn->eh->ttype_data, len);
  567. for (i = 0; i < len; i++)
  568. {
  569. tree ttype = stream_read_tree (ib, data_in);
  570. (*fn->eh->ttype_data)[i] = ttype;
  571. }
  572. }
  573. /* Read the table of action chains. */
  574. len = streamer_read_hwi (ib);
  575. gcc_assert (len == (int) len);
  576. if (len > 0)
  577. {
  578. if (targetm.arm_eabi_unwinder)
  579. {
  580. vec_safe_grow_cleared (fn->eh->ehspec_data.arm_eabi, len);
  581. for (i = 0; i < len; i++)
  582. {
  583. tree t = stream_read_tree (ib, data_in);
  584. (*fn->eh->ehspec_data.arm_eabi)[i] = t;
  585. }
  586. }
  587. else
  588. {
  589. vec_safe_grow_cleared (fn->eh->ehspec_data.other, len);
  590. for (i = 0; i < len; i++)
  591. {
  592. uchar c = streamer_read_uchar (ib);
  593. (*fn->eh->ehspec_data.other)[i] = c;
  594. }
  595. }
  596. }
  597. /* Reconstruct the EH region tree by fixing up the peer/children
  598. pointers. */
  599. fixup_eh_region_pointers (fn, root_region);
  600. tag = streamer_read_record_start (ib);
  601. lto_tag_check_range (tag, LTO_null, LTO_null);
  602. }
  603. /* Make a new basic block with index INDEX in function FN. */
  604. static basic_block
  605. make_new_block (struct function *fn, unsigned int index)
  606. {
  607. basic_block bb = alloc_block ();
  608. bb->index = index;
  609. SET_BASIC_BLOCK_FOR_FN (fn, index, bb);
  610. n_basic_blocks_for_fn (fn)++;
  611. return bb;
  612. }
  613. /* Read a wide-int. */
  614. static widest_int
  615. streamer_read_wi (struct lto_input_block *ib)
  616. {
  617. HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
  618. int i;
  619. int prec ATTRIBUTE_UNUSED = streamer_read_uhwi (ib);
  620. int len = streamer_read_uhwi (ib);
  621. for (i = 0; i < len; i++)
  622. a[i] = streamer_read_hwi (ib);
  623. return widest_int::from_array (a, len);
  624. }
  625. /* Read the CFG for function FN from input block IB. */
  626. static void
  627. input_cfg (struct lto_input_block *ib, struct data_in *data_in,
  628. struct function *fn,
  629. int count_materialization_scale)
  630. {
  631. unsigned int bb_count;
  632. basic_block p_bb;
  633. unsigned int i;
  634. int index;
  635. init_empty_tree_cfg_for_function (fn);
  636. init_ssa_operands (fn);
  637. profile_status_for_fn (fn) = streamer_read_enum (ib, profile_status_d,
  638. PROFILE_LAST);
  639. bb_count = streamer_read_uhwi (ib);
  640. last_basic_block_for_fn (fn) = bb_count;
  641. if (bb_count > basic_block_info_for_fn (fn)->length ())
  642. vec_safe_grow_cleared (basic_block_info_for_fn (fn), bb_count);
  643. if (bb_count > label_to_block_map_for_fn (fn)->length ())
  644. vec_safe_grow_cleared (label_to_block_map_for_fn (fn), bb_count);
  645. index = streamer_read_hwi (ib);
  646. while (index != -1)
  647. {
  648. basic_block bb = BASIC_BLOCK_FOR_FN (fn, index);
  649. unsigned int edge_count;
  650. if (bb == NULL)
  651. bb = make_new_block (fn, index);
  652. edge_count = streamer_read_uhwi (ib);
  653. /* Connect up the CFG. */
  654. for (i = 0; i < edge_count; i++)
  655. {
  656. unsigned int dest_index;
  657. unsigned int edge_flags;
  658. basic_block dest;
  659. int probability;
  660. gcov_type count;
  661. edge e;
  662. dest_index = streamer_read_uhwi (ib);
  663. probability = (int) streamer_read_hwi (ib);
  664. count = apply_scale ((gcov_type) streamer_read_gcov_count (ib),
  665. count_materialization_scale);
  666. edge_flags = streamer_read_uhwi (ib);
  667. dest = BASIC_BLOCK_FOR_FN (fn, dest_index);
  668. if (dest == NULL)
  669. dest = make_new_block (fn, dest_index);
  670. e = make_edge (bb, dest, edge_flags);
  671. e->probability = probability;
  672. e->count = count;
  673. }
  674. index = streamer_read_hwi (ib);
  675. }
  676. p_bb = ENTRY_BLOCK_PTR_FOR_FN (fn);
  677. index = streamer_read_hwi (ib);
  678. while (index != -1)
  679. {
  680. basic_block bb = BASIC_BLOCK_FOR_FN (fn, index);
  681. bb->prev_bb = p_bb;
  682. p_bb->next_bb = bb;
  683. p_bb = bb;
  684. index = streamer_read_hwi (ib);
  685. }
  686. /* ??? The cfgloop interface is tied to cfun. */
  687. gcc_assert (cfun == fn);
  688. /* Input the loop tree. */
  689. unsigned n_loops = streamer_read_uhwi (ib);
  690. if (n_loops == 0)
  691. return;
  692. struct loops *loops = ggc_cleared_alloc<struct loops> ();
  693. init_loops_structure (fn, loops, n_loops);
  694. set_loops_for_fn (fn, loops);
  695. /* Input each loop and associate it with its loop header so
  696. flow_loops_find can rebuild the loop tree. */
  697. for (unsigned i = 1; i < n_loops; ++i)
  698. {
  699. int header_index = streamer_read_hwi (ib);
  700. if (header_index == -1)
  701. {
  702. loops->larray->quick_push (NULL);
  703. continue;
  704. }
  705. struct loop *loop = alloc_loop ();
  706. loop->header = BASIC_BLOCK_FOR_FN (fn, header_index);
  707. loop->header->loop_father = loop;
  708. /* Read everything copy_loop_info copies. */
  709. loop->estimate_state = streamer_read_enum (ib, loop_estimation, EST_LAST);
  710. loop->any_upper_bound = streamer_read_hwi (ib);
  711. if (loop->any_upper_bound)
  712. loop->nb_iterations_upper_bound = streamer_read_wi (ib);
  713. loop->any_estimate = streamer_read_hwi (ib);
  714. if (loop->any_estimate)
  715. loop->nb_iterations_estimate = streamer_read_wi (ib);
  716. /* Read OMP SIMD related info. */
  717. loop->safelen = streamer_read_hwi (ib);
  718. loop->dont_vectorize = streamer_read_hwi (ib);
  719. loop->force_vectorize = streamer_read_hwi (ib);
  720. loop->simduid = stream_read_tree (ib, data_in);
  721. place_new_loop (fn, loop);
  722. /* flow_loops_find doesn't like loops not in the tree, hook them
  723. all as siblings of the tree root temporarily. */
  724. flow_loop_tree_node_add (loops->tree_root, loop);
  725. }
  726. /* Rebuild the loop tree. */
  727. flow_loops_find (loops);
  728. }
  729. /* Read the SSA names array for function FN from DATA_IN using input
  730. block IB. */
  731. static void
  732. input_ssa_names (struct lto_input_block *ib, struct data_in *data_in,
  733. struct function *fn)
  734. {
  735. unsigned int i, size;
  736. size = streamer_read_uhwi (ib);
  737. init_ssanames (fn, size);
  738. i = streamer_read_uhwi (ib);
  739. while (i)
  740. {
  741. tree ssa_name, name;
  742. bool is_default_def;
  743. /* Skip over the elements that had been freed. */
  744. while (SSANAMES (fn)->length () < i)
  745. SSANAMES (fn)->quick_push (NULL_TREE);
  746. is_default_def = (streamer_read_uchar (ib) != 0);
  747. name = stream_read_tree (ib, data_in);
  748. ssa_name = make_ssa_name_fn (fn, name, gimple_build_nop ());
  749. if (is_default_def)
  750. set_ssa_default_def (cfun, SSA_NAME_VAR (ssa_name), ssa_name);
  751. i = streamer_read_uhwi (ib);
  752. }
  753. }
  754. /* Go through all NODE edges and fixup call_stmt pointers
  755. so they point to STMTS. */
  756. static void
  757. fixup_call_stmt_edges_1 (struct cgraph_node *node, gimple *stmts,
  758. struct function *fn)
  759. {
  760. struct cgraph_edge *cedge;
  761. struct ipa_ref *ref = NULL;
  762. unsigned int i;
  763. for (cedge = node->callees; cedge; cedge = cedge->next_callee)
  764. {
  765. if (gimple_stmt_max_uid (fn) < cedge->lto_stmt_uid)
  766. fatal_error (input_location,
  767. "Cgraph edge statement index out of range");
  768. cedge->call_stmt = as_a <gcall *> (stmts[cedge->lto_stmt_uid - 1]);
  769. if (!cedge->call_stmt)
  770. fatal_error (input_location,
  771. "Cgraph edge statement index not found");
  772. }
  773. for (cedge = node->indirect_calls; cedge; cedge = cedge->next_callee)
  774. {
  775. if (gimple_stmt_max_uid (fn) < cedge->lto_stmt_uid)
  776. fatal_error (input_location,
  777. "Cgraph edge statement index out of range");
  778. cedge->call_stmt = as_a <gcall *> (stmts[cedge->lto_stmt_uid - 1]);
  779. if (!cedge->call_stmt)
  780. fatal_error (input_location, "Cgraph edge statement index not found");
  781. }
  782. for (i = 0; node->iterate_reference (i, ref); i++)
  783. if (ref->lto_stmt_uid)
  784. {
  785. if (gimple_stmt_max_uid (fn) < ref->lto_stmt_uid)
  786. fatal_error (input_location,
  787. "Reference statement index out of range");
  788. ref->stmt = stmts[ref->lto_stmt_uid - 1];
  789. if (!ref->stmt)
  790. fatal_error (input_location, "Reference statement index not found");
  791. }
  792. }
  793. /* Fixup call_stmt pointers in NODE and all clones. */
  794. static void
  795. fixup_call_stmt_edges (struct cgraph_node *orig, gimple *stmts)
  796. {
  797. struct cgraph_node *node;
  798. struct function *fn;
  799. while (orig->clone_of)
  800. orig = orig->clone_of;
  801. fn = DECL_STRUCT_FUNCTION (orig->decl);
  802. fixup_call_stmt_edges_1 (orig, stmts, fn);
  803. if (orig->clones)
  804. for (node = orig->clones; node != orig;)
  805. {
  806. fixup_call_stmt_edges_1 (node, stmts, fn);
  807. if (node->clones)
  808. node = node->clones;
  809. else if (node->next_sibling_clone)
  810. node = node->next_sibling_clone;
  811. else
  812. {
  813. while (node != orig && !node->next_sibling_clone)
  814. node = node->clone_of;
  815. if (node != orig)
  816. node = node->next_sibling_clone;
  817. }
  818. }
  819. }
  820. /* Input the base body of struct function FN from DATA_IN
  821. using input block IB. */
  822. static void
  823. input_struct_function_base (struct function *fn, struct data_in *data_in,
  824. struct lto_input_block *ib)
  825. {
  826. struct bitpack_d bp;
  827. int len;
  828. /* Read the static chain and non-local goto save area. */
  829. fn->static_chain_decl = stream_read_tree (ib, data_in);
  830. fn->nonlocal_goto_save_area = stream_read_tree (ib, data_in);
  831. /* Read all the local symbols. */
  832. len = streamer_read_hwi (ib);
  833. if (len > 0)
  834. {
  835. int i;
  836. vec_safe_grow_cleared (fn->local_decls, len);
  837. for (i = 0; i < len; i++)
  838. {
  839. tree t = stream_read_tree (ib, data_in);
  840. (*fn->local_decls)[i] = t;
  841. }
  842. }
  843. /* Input the current IL state of the function. */
  844. fn->curr_properties = streamer_read_uhwi (ib);
  845. /* Read all the attributes for FN. */
  846. bp = streamer_read_bitpack (ib);
  847. fn->is_thunk = bp_unpack_value (&bp, 1);
  848. fn->has_local_explicit_reg_vars = bp_unpack_value (&bp, 1);
  849. fn->returns_pcc_struct = bp_unpack_value (&bp, 1);
  850. fn->returns_struct = bp_unpack_value (&bp, 1);
  851. fn->can_throw_non_call_exceptions = bp_unpack_value (&bp, 1);
  852. fn->can_delete_dead_exceptions = bp_unpack_value (&bp, 1);
  853. fn->always_inline_functions_inlined = bp_unpack_value (&bp, 1);
  854. fn->after_inlining = bp_unpack_value (&bp, 1);
  855. fn->stdarg = bp_unpack_value (&bp, 1);
  856. fn->has_nonlocal_label = bp_unpack_value (&bp, 1);
  857. fn->calls_alloca = bp_unpack_value (&bp, 1);
  858. fn->calls_setjmp = bp_unpack_value (&bp, 1);
  859. fn->has_force_vectorize_loops = bp_unpack_value (&bp, 1);
  860. fn->has_simduid_loops = bp_unpack_value (&bp, 1);
  861. fn->va_list_fpr_size = bp_unpack_value (&bp, 8);
  862. fn->va_list_gpr_size = bp_unpack_value (&bp, 8);
  863. fn->last_clique = bp_unpack_value (&bp, sizeof (short) * 8);
  864. /* Input the function start and end loci. */
  865. fn->function_start_locus = stream_input_location_now (&bp, data_in);
  866. fn->function_end_locus = stream_input_location_now (&bp, data_in);
  867. }
  868. /* Read the body of function FN_DECL from DATA_IN using input block IB. */
  869. static void
  870. input_function (tree fn_decl, struct data_in *data_in,
  871. struct lto_input_block *ib, struct lto_input_block *ib_cfg)
  872. {
  873. struct function *fn;
  874. enum LTO_tags tag;
  875. gimple *stmts;
  876. basic_block bb;
  877. struct cgraph_node *node;
  878. tag = streamer_read_record_start (ib);
  879. lto_tag_check (tag, LTO_function);
  880. /* Read decls for parameters and args. */
  881. DECL_RESULT (fn_decl) = stream_read_tree (ib, data_in);
  882. DECL_ARGUMENTS (fn_decl) = streamer_read_chain (ib, data_in);
  883. /* Read the tree of lexical scopes for the function. */
  884. DECL_INITIAL (fn_decl) = stream_read_tree (ib, data_in);
  885. if (!streamer_read_uhwi (ib))
  886. return;
  887. push_struct_function (fn_decl);
  888. fn = DECL_STRUCT_FUNCTION (fn_decl);
  889. init_tree_ssa (fn);
  890. /* We input IL in SSA form. */
  891. cfun->gimple_df->in_ssa_p = true;
  892. gimple_register_cfg_hooks ();
  893. node = cgraph_node::get (fn_decl);
  894. if (!node)
  895. node = cgraph_node::create (fn_decl);
  896. input_struct_function_base (fn, data_in, ib);
  897. input_cfg (ib_cfg, data_in, fn, node->count_materialization_scale);
  898. /* Read all the SSA names. */
  899. input_ssa_names (ib, data_in, fn);
  900. /* Read the exception handling regions in the function. */
  901. input_eh_regions (ib, data_in, fn);
  902. gcc_assert (DECL_INITIAL (fn_decl));
  903. DECL_SAVED_TREE (fn_decl) = NULL_TREE;
  904. /* Read all the basic blocks. */
  905. tag = streamer_read_record_start (ib);
  906. while (tag)
  907. {
  908. input_bb (ib, tag, data_in, fn,
  909. node->count_materialization_scale);
  910. tag = streamer_read_record_start (ib);
  911. }
  912. /* Fix up the call statements that are mentioned in the callgraph
  913. edges. */
  914. set_gimple_stmt_max_uid (cfun, 0);
  915. FOR_ALL_BB_FN (bb, cfun)
  916. {
  917. gimple_stmt_iterator gsi;
  918. for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  919. {
  920. gimple stmt = gsi_stmt (gsi);
  921. gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
  922. }
  923. for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  924. {
  925. gimple stmt = gsi_stmt (gsi);
  926. gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
  927. }
  928. }
  929. stmts = (gimple *) xcalloc (gimple_stmt_max_uid (fn), sizeof (gimple));
  930. FOR_ALL_BB_FN (bb, cfun)
  931. {
  932. gimple_stmt_iterator bsi = gsi_start_phis (bb);
  933. while (!gsi_end_p (bsi))
  934. {
  935. gimple stmt = gsi_stmt (bsi);
  936. gsi_next (&bsi);
  937. stmts[gimple_uid (stmt)] = stmt;
  938. }
  939. bsi = gsi_start_bb (bb);
  940. while (!gsi_end_p (bsi))
  941. {
  942. gimple stmt = gsi_stmt (bsi);
  943. /* If we're recompiling LTO objects with debug stmts but
  944. we're not supposed to have debug stmts, remove them now.
  945. We can't remove them earlier because this would cause uid
  946. mismatches in fixups, but we can do it at this point, as
  947. long as debug stmts don't require fixups. */
  948. if (!MAY_HAVE_DEBUG_STMTS && !flag_wpa && is_gimple_debug (stmt))
  949. {
  950. gimple_stmt_iterator gsi = bsi;
  951. gsi_next (&bsi);
  952. gsi_remove (&gsi, true);
  953. }
  954. else
  955. {
  956. gsi_next (&bsi);
  957. stmts[gimple_uid (stmt)] = stmt;
  958. }
  959. }
  960. }
  961. /* Set the gimple body to the statement sequence in the entry
  962. basic block. FIXME lto, this is fairly hacky. The existence
  963. of a gimple body is used by the cgraph routines, but we should
  964. really use the presence of the CFG. */
  965. {
  966. edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
  967. gimple_set_body (fn_decl, bb_seq (ei_edge (ei)->dest));
  968. }
  969. fixup_call_stmt_edges (node, stmts);
  970. execute_all_ipa_stmt_fixups (node, stmts);
  971. update_ssa (TODO_update_ssa_only_virtuals);
  972. free_dominance_info (CDI_DOMINATORS);
  973. free_dominance_info (CDI_POST_DOMINATORS);
  974. free (stmts);
  975. pop_cfun ();
  976. }
  977. /* Read the body of function FN_DECL from DATA_IN using input block IB. */
  978. static void
  979. input_constructor (tree var, struct data_in *data_in,
  980. struct lto_input_block *ib)
  981. {
  982. DECL_INITIAL (var) = stream_read_tree (ib, data_in);
  983. }
  984. /* Read the body from DATA for function NODE and fill it in.
  985. FILE_DATA are the global decls and types. SECTION_TYPE is either
  986. LTO_section_function_body or LTO_section_static_initializer. If
  987. section type is LTO_section_function_body, FN must be the decl for
  988. that function. */
  989. static void
  990. lto_read_body_or_constructor (struct lto_file_decl_data *file_data, struct symtab_node *node,
  991. const char *data, enum lto_section_type section_type)
  992. {
  993. const struct lto_function_header *header;
  994. struct data_in *data_in;
  995. int cfg_offset;
  996. int main_offset;
  997. int string_offset;
  998. tree fn_decl = node->decl;
  999. header = (const struct lto_function_header *) data;
  1000. if (TREE_CODE (node->decl) == FUNCTION_DECL)
  1001. {
  1002. cfg_offset = sizeof (struct lto_function_header);
  1003. main_offset = cfg_offset + header->cfg_size;
  1004. string_offset = main_offset + header->main_size;
  1005. }
  1006. else
  1007. {
  1008. main_offset = sizeof (struct lto_function_header);
  1009. string_offset = main_offset + header->main_size;
  1010. }
  1011. data_in = lto_data_in_create (file_data, data + string_offset,
  1012. header->string_size, vNULL);
  1013. if (section_type == LTO_section_function_body)
  1014. {
  1015. struct lto_in_decl_state *decl_state;
  1016. unsigned from;
  1017. gcc_checking_assert (node);
  1018. /* Use the function's decl state. */
  1019. decl_state = lto_get_function_in_decl_state (file_data, fn_decl);
  1020. gcc_assert (decl_state);
  1021. file_data->current_decl_state = decl_state;
  1022. /* Set up the struct function. */
  1023. from = data_in->reader_cache->nodes.length ();
  1024. lto_input_block ib_main (data + main_offset, header->main_size,
  1025. file_data->mode_table);
  1026. if (TREE_CODE (node->decl) == FUNCTION_DECL)
  1027. {
  1028. lto_input_block ib_cfg (data + cfg_offset, header->cfg_size,
  1029. file_data->mode_table);
  1030. input_function (fn_decl, data_in, &ib_main, &ib_cfg);
  1031. }
  1032. else
  1033. input_constructor (fn_decl, data_in, &ib_main);
  1034. data_in->location_cache.apply_location_cache ();
  1035. /* And fixup types we streamed locally. */
  1036. {
  1037. struct streamer_tree_cache_d *cache = data_in->reader_cache;
  1038. unsigned len = cache->nodes.length ();
  1039. unsigned i;
  1040. for (i = len; i-- > from;)
  1041. {
  1042. tree t = streamer_tree_cache_get_tree (cache, i);
  1043. if (t == NULL_TREE)
  1044. continue;
  1045. if (TYPE_P (t))
  1046. {
  1047. gcc_assert (TYPE_CANONICAL (t) == NULL_TREE);
  1048. TYPE_CANONICAL (t) = TYPE_MAIN_VARIANT (t);
  1049. if (TYPE_MAIN_VARIANT (t) != t)
  1050. {
  1051. gcc_assert (TYPE_NEXT_VARIANT (t) == NULL_TREE);
  1052. TYPE_NEXT_VARIANT (t)
  1053. = TYPE_NEXT_VARIANT (TYPE_MAIN_VARIANT (t));
  1054. TYPE_NEXT_VARIANT (TYPE_MAIN_VARIANT (t)) = t;
  1055. }
  1056. }
  1057. }
  1058. }
  1059. /* Restore decl state */
  1060. file_data->current_decl_state = file_data->global_decl_state;
  1061. }
  1062. lto_data_in_delete (data_in);
  1063. }
  1064. /* Read the body of NODE using DATA. FILE_DATA holds the global
  1065. decls and types. */
  1066. void
  1067. lto_input_function_body (struct lto_file_decl_data *file_data,
  1068. struct cgraph_node *node, const char *data)
  1069. {
  1070. lto_read_body_or_constructor (file_data, node, data, LTO_section_function_body);
  1071. }
  1072. /* Read the body of NODE using DATA. FILE_DATA holds the global
  1073. decls and types. */
  1074. void
  1075. lto_input_variable_constructor (struct lto_file_decl_data *file_data,
  1076. struct varpool_node *node, const char *data)
  1077. {
  1078. lto_read_body_or_constructor (file_data, node, data, LTO_section_function_body);
  1079. }
  1080. /* Read the physical representation of a tree node EXPR from
  1081. input block IB using the per-file context in DATA_IN. */
  1082. static void
  1083. lto_read_tree_1 (struct lto_input_block *ib, struct data_in *data_in, tree expr)
  1084. {
  1085. /* Read all the bitfield values in EXPR. Note that for LTO, we
  1086. only write language-independent bitfields, so no more unpacking is
  1087. needed. */
  1088. streamer_read_tree_bitfields (ib, data_in, expr);
  1089. /* Read all the pointer fields in EXPR. */
  1090. streamer_read_tree_body (ib, data_in, expr);
  1091. /* Read any LTO-specific data not read by the tree streamer. */
  1092. if (DECL_P (expr)
  1093. && TREE_CODE (expr) != FUNCTION_DECL
  1094. && TREE_CODE (expr) != TRANSLATION_UNIT_DECL)
  1095. DECL_INITIAL (expr) = stream_read_tree (ib, data_in);
  1096. /* We should never try to instantiate an MD or NORMAL builtin here. */
  1097. if (TREE_CODE (expr) == FUNCTION_DECL)
  1098. gcc_assert (!streamer_handle_as_builtin_p (expr));
  1099. #ifdef LTO_STREAMER_DEBUG
  1100. /* Remove the mapping to RESULT's original address set by
  1101. streamer_alloc_tree. */
  1102. lto_orig_address_remove (expr);
  1103. #endif
  1104. }
  1105. /* Read the physical representation of a tree node with tag TAG from
  1106. input block IB using the per-file context in DATA_IN. */
  1107. static tree
  1108. lto_read_tree (struct lto_input_block *ib, struct data_in *data_in,
  1109. enum LTO_tags tag, hashval_t hash)
  1110. {
  1111. /* Instantiate a new tree node. */
  1112. tree result = streamer_alloc_tree (ib, data_in, tag);
  1113. /* Enter RESULT in the reader cache. This will make RESULT
  1114. available so that circular references in the rest of the tree
  1115. structure can be resolved in subsequent calls to stream_read_tree. */
  1116. streamer_tree_cache_append (data_in->reader_cache, result, hash);
  1117. lto_read_tree_1 (ib, data_in, result);
  1118. /* end_marker = */ streamer_read_uchar (ib);
  1119. return result;
  1120. }
  1121. /* Populate the reader cache with trees materialized from the SCC
  1122. following in the IB, DATA_IN stream. */
  1123. hashval_t
  1124. lto_input_scc (struct lto_input_block *ib, struct data_in *data_in,
  1125. unsigned *len, unsigned *entry_len)
  1126. {
  1127. /* A blob of unnamed tree nodes, fill the cache from it and
  1128. recurse. */
  1129. unsigned size = streamer_read_uhwi (ib);
  1130. hashval_t scc_hash = streamer_read_uhwi (ib);
  1131. unsigned scc_entry_len = 1;
  1132. if (size == 1)
  1133. {
  1134. enum LTO_tags tag = streamer_read_record_start (ib);
  1135. lto_input_tree_1 (ib, data_in, tag, scc_hash);
  1136. }
  1137. else
  1138. {
  1139. unsigned int first = data_in->reader_cache->nodes.length ();
  1140. tree result;
  1141. scc_entry_len = streamer_read_uhwi (ib);
  1142. /* Materialize size trees by reading their headers. */
  1143. for (unsigned i = 0; i < size; ++i)
  1144. {
  1145. enum LTO_tags tag = streamer_read_record_start (ib);
  1146. if (tag == LTO_null
  1147. || (tag >= LTO_field_decl_ref && tag <= LTO_global_decl_ref)
  1148. || tag == LTO_tree_pickle_reference
  1149. || tag == LTO_builtin_decl
  1150. || tag == LTO_integer_cst
  1151. || tag == LTO_tree_scc)
  1152. gcc_unreachable ();
  1153. result = streamer_alloc_tree (ib, data_in, tag);
  1154. streamer_tree_cache_append (data_in->reader_cache, result, 0);
  1155. }
  1156. /* Read the tree bitpacks and references. */
  1157. for (unsigned i = 0; i < size; ++i)
  1158. {
  1159. result = streamer_tree_cache_get_tree (data_in->reader_cache,
  1160. first + i);
  1161. lto_read_tree_1 (ib, data_in, result);
  1162. /* end_marker = */ streamer_read_uchar (ib);
  1163. }
  1164. }
  1165. *len = size;
  1166. *entry_len = scc_entry_len;
  1167. return scc_hash;
  1168. }
  1169. /* Read a tree from input block IB using the per-file context in
  1170. DATA_IN. This context is used, for example, to resolve references
  1171. to previously read nodes. */
  1172. tree
  1173. lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in,
  1174. enum LTO_tags tag, hashval_t hash)
  1175. {
  1176. tree result;
  1177. gcc_assert ((unsigned) tag < (unsigned) LTO_NUM_TAGS);
  1178. if (tag == LTO_null)
  1179. result = NULL_TREE;
  1180. else if (tag >= LTO_field_decl_ref && tag <= LTO_namelist_decl_ref)
  1181. {
  1182. /* If TAG is a reference to an indexable tree, the next value
  1183. in IB is the index into the table where we expect to find
  1184. that tree. */
  1185. result = lto_input_tree_ref (ib, data_in, cfun, tag);
  1186. }
  1187. else if (tag == LTO_tree_pickle_reference)
  1188. {
  1189. /* If TAG is a reference to a previously read tree, look it up in
  1190. the reader cache. */
  1191. result = streamer_get_pickled_tree (ib, data_in);
  1192. }
  1193. else if (tag == LTO_builtin_decl)
  1194. {
  1195. /* If we are going to read a built-in function, all we need is
  1196. the code and class. */
  1197. result = streamer_get_builtin_tree (ib, data_in);
  1198. }
  1199. else if (tag == LTO_integer_cst)
  1200. {
  1201. /* For shared integer constants in singletons we can use the
  1202. existing tree integer constant merging code. */
  1203. tree type = stream_read_tree (ib, data_in);
  1204. unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib);
  1205. unsigned HOST_WIDE_INT i;
  1206. HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
  1207. for (i = 0; i < len; i++)
  1208. a[i] = streamer_read_hwi (ib);
  1209. gcc_assert (TYPE_PRECISION (type) <= MAX_BITSIZE_MODE_ANY_INT);
  1210. result = wide_int_to_tree (type, wide_int::from_array
  1211. (a, len, TYPE_PRECISION (type)));
  1212. streamer_tree_cache_append (data_in->reader_cache, result, hash);
  1213. }
  1214. else if (tag == LTO_tree_scc)
  1215. gcc_unreachable ();
  1216. else
  1217. {
  1218. /* Otherwise, materialize a new node from IB. */
  1219. result = lto_read_tree (ib, data_in, tag, hash);
  1220. }
  1221. return result;
  1222. }
  1223. tree
  1224. lto_input_tree (struct lto_input_block *ib, struct data_in *data_in)
  1225. {
  1226. enum LTO_tags tag;
  1227. /* Input and skip SCCs. */
  1228. while ((tag = streamer_read_record_start (ib)) == LTO_tree_scc)
  1229. {
  1230. unsigned len, entry_len;
  1231. lto_input_scc (ib, data_in, &len, &entry_len);
  1232. }
  1233. return lto_input_tree_1 (ib, data_in, tag, 0);
  1234. }
  1235. /* Input toplevel asms. */
  1236. void
  1237. lto_input_toplevel_asms (struct lto_file_decl_data *file_data, int order_base)
  1238. {
  1239. size_t len;
  1240. const char *data = lto_get_section_data (file_data, LTO_section_asm,
  1241. NULL, &len);
  1242. const struct lto_simple_header_with_strings *header
  1243. = (const struct lto_simple_header_with_strings *) data;
  1244. int string_offset;
  1245. struct data_in *data_in;
  1246. tree str;
  1247. if (! data)
  1248. return;
  1249. string_offset = sizeof (*header) + header->main_size;
  1250. lto_input_block ib (data + sizeof (*header), header->main_size,
  1251. file_data->mode_table);
  1252. data_in = lto_data_in_create (file_data, data + string_offset,
  1253. header->string_size, vNULL);
  1254. while ((str = streamer_read_string_cst (data_in, &ib)))
  1255. {
  1256. asm_node *node = symtab->finalize_toplevel_asm (str);
  1257. node->order = streamer_read_hwi (&ib) + order_base;
  1258. if (node->order >= symtab->order)
  1259. symtab->order = node->order + 1;
  1260. }
  1261. lto_data_in_delete (data_in);
  1262. lto_free_section_data (file_data, LTO_section_asm, NULL, data, len);
  1263. }
  1264. /* Input mode table. */
  1265. void
  1266. lto_input_mode_table (struct lto_file_decl_data *file_data)
  1267. {
  1268. size_t len;
  1269. const char *data = lto_get_section_data (file_data, LTO_section_mode_table,
  1270. NULL, &len);
  1271. if (! data)
  1272. {
  1273. internal_error ("cannot read LTO mode table from %s",
  1274. file_data->file_name);
  1275. return;
  1276. }
  1277. unsigned char *table = ggc_cleared_vec_alloc<unsigned char> (1 << 8);
  1278. file_data->mode_table = table;
  1279. const struct lto_simple_header_with_strings *header
  1280. = (const struct lto_simple_header_with_strings *) data;
  1281. int string_offset;
  1282. struct data_in *data_in;
  1283. string_offset = sizeof (*header) + header->main_size;
  1284. lto_input_block ib (data + sizeof (*header), header->main_size, NULL);
  1285. data_in = lto_data_in_create (file_data, data + string_offset,
  1286. header->string_size, vNULL);
  1287. bitpack_d bp = streamer_read_bitpack (&ib);
  1288. table[VOIDmode] = VOIDmode;
  1289. table[BLKmode] = BLKmode;
  1290. unsigned int m;
  1291. while ((m = bp_unpack_value (&bp, 8)) != VOIDmode)
  1292. {
  1293. enum mode_class mclass
  1294. = bp_unpack_enum (&bp, mode_class, MAX_MODE_CLASS);
  1295. unsigned int size = bp_unpack_value (&bp, 8);
  1296. unsigned int prec = bp_unpack_value (&bp, 16);
  1297. machine_mode inner = (machine_mode) table[bp_unpack_value (&bp, 8)];
  1298. unsigned int nunits = bp_unpack_value (&bp, 8);
  1299. unsigned int ibit = 0, fbit = 0;
  1300. unsigned int real_fmt_len = 0;
  1301. const char *real_fmt_name = NULL;
  1302. switch (mclass)
  1303. {
  1304. case MODE_FRACT:
  1305. case MODE_UFRACT:
  1306. case MODE_ACCUM:
  1307. case MODE_UACCUM:
  1308. ibit = bp_unpack_value (&bp, 8);
  1309. fbit = bp_unpack_value (&bp, 8);
  1310. break;
  1311. case MODE_FLOAT:
  1312. case MODE_DECIMAL_FLOAT:
  1313. real_fmt_name = bp_unpack_indexed_string (data_in, &bp,
  1314. &real_fmt_len);
  1315. break;
  1316. default:
  1317. break;
  1318. }
  1319. /* First search just the GET_CLASS_NARROWEST_MODE to wider modes,
  1320. if not found, fallback to all modes. */
  1321. int pass;
  1322. for (pass = 0; pass < 2; pass++)
  1323. for (machine_mode mr = pass ? VOIDmode
  1324. : GET_CLASS_NARROWEST_MODE (mclass);
  1325. pass ? mr < MAX_MACHINE_MODE : mr != VOIDmode;
  1326. pass ? mr = (machine_mode) (m + 1)
  1327. : mr = GET_MODE_WIDER_MODE (mr))
  1328. if (GET_MODE_CLASS (mr) != mclass
  1329. || GET_MODE_SIZE (mr) != size
  1330. || GET_MODE_PRECISION (mr) != prec
  1331. || GET_MODE_INNER (mr) != inner
  1332. || GET_MODE_IBIT (mr) != ibit
  1333. || GET_MODE_FBIT (mr) != fbit
  1334. || GET_MODE_NUNITS (mr) != nunits)
  1335. continue;
  1336. else if ((mclass == MODE_FLOAT || mclass == MODE_DECIMAL_FLOAT)
  1337. && strcmp (REAL_MODE_FORMAT (mr)->name, real_fmt_name) != 0)
  1338. continue;
  1339. else
  1340. {
  1341. table[m] = mr;
  1342. pass = 2;
  1343. break;
  1344. }
  1345. unsigned int mname_len;
  1346. const char *mname = bp_unpack_indexed_string (data_in, &bp, &mname_len);
  1347. if (pass == 2)
  1348. {
  1349. switch (mclass)
  1350. {
  1351. case MODE_VECTOR_INT:
  1352. case MODE_VECTOR_FLOAT:
  1353. case MODE_VECTOR_FRACT:
  1354. case MODE_VECTOR_UFRACT:
  1355. case MODE_VECTOR_ACCUM:
  1356. case MODE_VECTOR_UACCUM:
  1357. /* For unsupported vector modes just use BLKmode,
  1358. if the scalar mode is supported. */
  1359. if (inner != VOIDmode)
  1360. {
  1361. table[m] = BLKmode;
  1362. break;
  1363. }
  1364. /* FALLTHRU */
  1365. default:
  1366. fatal_error (UNKNOWN_LOCATION, "unsupported mode %s\n", mname);
  1367. break;
  1368. }
  1369. }
  1370. }
  1371. lto_data_in_delete (data_in);
  1372. lto_free_section_data (file_data, LTO_section_mode_table, NULL, data, len);
  1373. }
  1374. /* Initialization for the LTO reader. */
  1375. void
  1376. lto_reader_init (void)
  1377. {
  1378. lto_streamer_init ();
  1379. file_name_hash_table
  1380. = new hash_table<freeing_string_slot_hasher> (37);
  1381. }
  1382. /* Create a new data_in object for FILE_DATA. STRINGS is the string
  1383. table to use with LEN strings. RESOLUTIONS is the vector of linker
  1384. resolutions (NULL if not using a linker plugin). */
  1385. struct data_in *
  1386. lto_data_in_create (struct lto_file_decl_data *file_data, const char *strings,
  1387. unsigned len,
  1388. vec<ld_plugin_symbol_resolution_t> resolutions)
  1389. {
  1390. struct data_in *data_in = new (struct data_in);
  1391. data_in->file_data = file_data;
  1392. data_in->strings = strings;
  1393. data_in->strings_len = len;
  1394. data_in->globals_resolution = resolutions;
  1395. data_in->reader_cache = streamer_tree_cache_create (false, false, true);
  1396. return data_in;
  1397. }
  1398. /* Remove DATA_IN. */
  1399. void
  1400. lto_data_in_delete (struct data_in *data_in)
  1401. {
  1402. data_in->globals_resolution.release ();
  1403. streamer_tree_cache_delete (data_in->reader_cache);
  1404. delete data_in;
  1405. }