12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630 |
- /* Loop invariant motion.
- Copyright (C) 2003-2015 Free Software Foundation, Inc.
- This file is part of GCC.
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 3, or (at your option) any
- later version.
- GCC is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for more details.
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
- #include "config.h"
- #include "system.h"
- #include "coretypes.h"
- #include "tm.h"
- #include "hash-set.h"
- #include "machmode.h"
- #include "vec.h"
- #include "double-int.h"
- #include "input.h"
- #include "alias.h"
- #include "symtab.h"
- #include "wide-int.h"
- #include "inchash.h"
- #include "tree.h"
- #include "fold-const.h"
- #include "tm_p.h"
- #include "predict.h"
- #include "hard-reg-set.h"
- #include "input.h"
- #include "function.h"
- #include "dominance.h"
- #include "cfg.h"
- #include "cfganal.h"
- #include "basic-block.h"
- #include "gimple-pretty-print.h"
- #include "hash-map.h"
- #include "hash-table.h"
- #include "tree-ssa-alias.h"
- #include "internal-fn.h"
- #include "tree-eh.h"
- #include "gimple-expr.h"
- #include "is-a.h"
- #include "gimple.h"
- #include "gimplify.h"
- #include "gimple-iterator.h"
- #include "gimple-ssa.h"
- #include "tree-cfg.h"
- #include "tree-phinodes.h"
- #include "ssa-iterators.h"
- #include "stringpool.h"
- #include "tree-ssanames.h"
- #include "tree-ssa-loop-manip.h"
- #include "tree-ssa-loop.h"
- #include "tree-into-ssa.h"
- #include "cfgloop.h"
- #include "domwalk.h"
- #include "params.h"
- #include "tree-pass.h"
- #include "flags.h"
- #include "tree-affine.h"
- #include "tree-ssa-propagate.h"
- #include "trans-mem.h"
- #include "gimple-fold.h"
- /* TODO: Support for predicated code motion. I.e.
- while (1)
- {
- if (cond)
- {
- a = inv;
- something;
- }
- }
- Where COND and INV are invariants, but evaluating INV may trap or be
- invalid from some other reason if !COND. This may be transformed to
- if (cond)
- a = inv;
- while (1)
- {
- if (cond)
- something;
- } */
- /* The auxiliary data kept for each statement. */
- struct lim_aux_data
- {
- struct loop *max_loop; /* The outermost loop in that the statement
- is invariant. */
- struct loop *tgt_loop; /* The loop out of that we want to move the
- invariant. */
- struct loop *always_executed_in;
- /* The outermost loop for that we are sure
- the statement is executed if the loop
- is entered. */
- unsigned cost; /* Cost of the computation performed by the
- statement. */
- vec<gimple> depends; /* Vector of statements that must be also
- hoisted out of the loop when this statement
- is hoisted; i.e. those that define the
- operands of the statement and are inside of
- the MAX_LOOP loop. */
- };
- /* Maps statements to their lim_aux_data. */
- static hash_map<gimple, lim_aux_data *> *lim_aux_data_map;
- /* Description of a memory reference location. */
- typedef struct mem_ref_loc
- {
- tree *ref; /* The reference itself. */
- gimple stmt; /* The statement in that it occurs. */
- } *mem_ref_loc_p;
- /* Description of a memory reference. */
- typedef struct im_mem_ref
- {
- unsigned id; /* ID assigned to the memory reference
- (its index in memory_accesses.refs_list) */
- hashval_t hash; /* Its hash value. */
- /* The memory access itself and associated caching of alias-oracle
- query meta-data. */
- ao_ref mem;
- bitmap stored; /* The set of loops in that this memory location
- is stored to. */
- vec<mem_ref_loc> accesses_in_loop;
- /* The locations of the accesses. Vector
- indexed by the loop number. */
- /* The following sets are computed on demand. We keep both set and
- its complement, so that we know whether the information was
- already computed or not. */
- bitmap_head indep_loop; /* The set of loops in that the memory
- reference is independent, meaning:
- If it is stored in the loop, this store
- is independent on all other loads and
- stores.
- If it is only loaded, then it is independent
- on all stores in the loop. */
- bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
- } *mem_ref_p;
- /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
- to record (in)dependence against stores in the loop and its subloops, the
- second to record (in)dependence against all references in the loop
- and its subloops. */
- #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
- /* Mem_ref hashtable helpers. */
- struct mem_ref_hasher : typed_noop_remove <im_mem_ref>
- {
- typedef im_mem_ref value_type;
- typedef tree_node compare_type;
- static inline hashval_t hash (const value_type *);
- static inline bool equal (const value_type *, const compare_type *);
- };
- /* A hash function for struct im_mem_ref object OBJ. */
- inline hashval_t
- mem_ref_hasher::hash (const value_type *mem)
- {
- return mem->hash;
- }
- /* An equality function for struct im_mem_ref object MEM1 with
- memory reference OBJ2. */
- inline bool
- mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
- {
- return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
- }
- /* Description of memory accesses in loops. */
- static struct
- {
- /* The hash table of memory references accessed in loops. */
- hash_table<mem_ref_hasher> *refs;
- /* The list of memory references. */
- vec<mem_ref_p> refs_list;
- /* The set of memory references accessed in each loop. */
- vec<bitmap_head> refs_in_loop;
- /* The set of memory references stored in each loop. */
- vec<bitmap_head> refs_stored_in_loop;
- /* The set of memory references stored in each loop, including subloops . */
- vec<bitmap_head> all_refs_stored_in_loop;
- /* Cache for expanding memory addresses. */
- hash_map<tree, name_expansion *> *ttae_cache;
- } memory_accesses;
- /* Obstack for the bitmaps in the above data structures. */
- static bitmap_obstack lim_bitmap_obstack;
- static obstack mem_ref_obstack;
- static bool ref_indep_loop_p (struct loop *, mem_ref_p);
- /* Minimum cost of an expensive expression. */
- #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
- /* The outermost loop for which execution of the header guarantees that the
- block will be executed. */
- #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
- #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
- /* ID of the shared unanalyzable mem. */
- #define UNANALYZABLE_MEM_ID 0
- /* Whether the reference was analyzable. */
- #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
- static struct lim_aux_data *
- init_lim_data (gimple stmt)
- {
- lim_aux_data *p = XCNEW (struct lim_aux_data);
- lim_aux_data_map->put (stmt, p);
- return p;
- }
- static struct lim_aux_data *
- get_lim_data (gimple stmt)
- {
- lim_aux_data **p = lim_aux_data_map->get (stmt);
- if (!p)
- return NULL;
- return *p;
- }
- /* Releases the memory occupied by DATA. */
- static void
- free_lim_aux_data (struct lim_aux_data *data)
- {
- data->depends.release ();
- free (data);
- }
- static void
- clear_lim_data (gimple stmt)
- {
- lim_aux_data **p = lim_aux_data_map->get (stmt);
- if (!p)
- return;
- free_lim_aux_data (*p);
- *p = NULL;
- }
- /* The possibilities of statement movement. */
- enum move_pos
- {
- MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
- MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
- become executed -- memory accesses, ... */
- MOVE_POSSIBLE /* Unlimited movement. */
- };
- /* If it is possible to hoist the statement STMT unconditionally,
- returns MOVE_POSSIBLE.
- If it is possible to hoist the statement STMT, but we must avoid making
- it executed if it would not be executed in the original program (e.g.
- because it may trap), return MOVE_PRESERVE_EXECUTION.
- Otherwise return MOVE_IMPOSSIBLE. */
- enum move_pos
- movement_possibility (gimple stmt)
- {
- tree lhs;
- enum move_pos ret = MOVE_POSSIBLE;
- if (flag_unswitch_loops
- && gimple_code (stmt) == GIMPLE_COND)
- {
- /* If we perform unswitching, force the operands of the invariant
- condition to be moved out of the loop. */
- return MOVE_POSSIBLE;
- }
- if (gimple_code (stmt) == GIMPLE_PHI
- && gimple_phi_num_args (stmt) <= 2
- && !virtual_operand_p (gimple_phi_result (stmt))
- && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
- return MOVE_POSSIBLE;
- if (gimple_get_lhs (stmt) == NULL_TREE)
- return MOVE_IMPOSSIBLE;
- if (gimple_vdef (stmt))
- return MOVE_IMPOSSIBLE;
- if (stmt_ends_bb_p (stmt)
- || gimple_has_volatile_ops (stmt)
- || gimple_has_side_effects (stmt)
- || stmt_could_throw_p (stmt))
- return MOVE_IMPOSSIBLE;
- if (is_gimple_call (stmt))
- {
- /* While pure or const call is guaranteed to have no side effects, we
- cannot move it arbitrarily. Consider code like
- char *s = something ();
- while (1)
- {
- if (s)
- t = strlen (s);
- else
- t = 0;
- }
- Here the strlen call cannot be moved out of the loop, even though
- s is invariant. In addition to possibly creating a call with
- invalid arguments, moving out a function call that is not executed
- may cause performance regressions in case the call is costly and
- not executed at all. */
- ret = MOVE_PRESERVE_EXECUTION;
- lhs = gimple_call_lhs (stmt);
- }
- else if (is_gimple_assign (stmt))
- lhs = gimple_assign_lhs (stmt);
- else
- return MOVE_IMPOSSIBLE;
- if (TREE_CODE (lhs) == SSA_NAME
- && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
- return MOVE_IMPOSSIBLE;
- if (TREE_CODE (lhs) != SSA_NAME
- || gimple_could_trap_p (stmt))
- return MOVE_PRESERVE_EXECUTION;
- /* Non local loads in a transaction cannot be hoisted out. Well,
- unless the load happens on every path out of the loop, but we
- don't take this into account yet. */
- if (flag_tm
- && gimple_in_transaction (stmt)
- && gimple_assign_single_p (stmt))
- {
- tree rhs = gimple_assign_rhs1 (stmt);
- if (DECL_P (rhs) && is_global_var (rhs))
- {
- if (dump_file)
- {
- fprintf (dump_file, "Cannot hoist conditional load of ");
- print_generic_expr (dump_file, rhs, TDF_SLIM);
- fprintf (dump_file, " because it is in a transaction.\n");
- }
- return MOVE_IMPOSSIBLE;
- }
- }
- return ret;
- }
- /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
- loop to that we could move the expression using DEF if it did not have
- other operands, i.e. the outermost loop enclosing LOOP in that the value
- of DEF is invariant. */
- static struct loop *
- outermost_invariant_loop (tree def, struct loop *loop)
- {
- gimple def_stmt;
- basic_block def_bb;
- struct loop *max_loop;
- struct lim_aux_data *lim_data;
- if (!def)
- return superloop_at_depth (loop, 1);
- if (TREE_CODE (def) != SSA_NAME)
- {
- gcc_assert (is_gimple_min_invariant (def));
- return superloop_at_depth (loop, 1);
- }
- def_stmt = SSA_NAME_DEF_STMT (def);
- def_bb = gimple_bb (def_stmt);
- if (!def_bb)
- return superloop_at_depth (loop, 1);
- max_loop = find_common_loop (loop, def_bb->loop_father);
- lim_data = get_lim_data (def_stmt);
- if (lim_data != NULL && lim_data->max_loop != NULL)
- max_loop = find_common_loop (max_loop,
- loop_outer (lim_data->max_loop));
- if (max_loop == loop)
- return NULL;
- max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
- return max_loop;
- }
- /* DATA is a structure containing information associated with a statement
- inside LOOP. DEF is one of the operands of this statement.
- Find the outermost loop enclosing LOOP in that value of DEF is invariant
- and record this in DATA->max_loop field. If DEF itself is defined inside
- this loop as well (i.e. we need to hoist it out of the loop if we want
- to hoist the statement represented by DATA), record the statement in that
- DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
- add the cost of the computation of DEF to the DATA->cost.
- If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
- static bool
- add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
- bool add_cost)
- {
- gimple def_stmt = SSA_NAME_DEF_STMT (def);
- basic_block def_bb = gimple_bb (def_stmt);
- struct loop *max_loop;
- struct lim_aux_data *def_data;
- if (!def_bb)
- return true;
- max_loop = outermost_invariant_loop (def, loop);
- if (!max_loop)
- return false;
- if (flow_loop_nested_p (data->max_loop, max_loop))
- data->max_loop = max_loop;
- def_data = get_lim_data (def_stmt);
- if (!def_data)
- return true;
- if (add_cost
- /* Only add the cost if the statement defining DEF is inside LOOP,
- i.e. if it is likely that by moving the invariants dependent
- on it, we will be able to avoid creating a new register for
- it (since it will be only used in these dependent invariants). */
- && def_bb->loop_father == loop)
- data->cost += def_data->cost;
- data->depends.safe_push (def_stmt);
- return true;
- }
- /* Returns an estimate for a cost of statement STMT. The values here
- are just ad-hoc constants, similar to costs for inlining. */
- static unsigned
- stmt_cost (gimple stmt)
- {
- /* Always try to create possibilities for unswitching. */
- if (gimple_code (stmt) == GIMPLE_COND
- || gimple_code (stmt) == GIMPLE_PHI)
- return LIM_EXPENSIVE;
- /* We should be hoisting calls if possible. */
- if (is_gimple_call (stmt))
- {
- tree fndecl;
- /* Unless the call is a builtin_constant_p; this always folds to a
- constant, so moving it is useless. */
- fndecl = gimple_call_fndecl (stmt);
- if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
- return 0;
- return LIM_EXPENSIVE;
- }
- /* Hoisting memory references out should almost surely be a win. */
- if (gimple_references_memory_p (stmt))
- return LIM_EXPENSIVE;
- if (gimple_code (stmt) != GIMPLE_ASSIGN)
- return 1;
- switch (gimple_assign_rhs_code (stmt))
- {
- case MULT_EXPR:
- case WIDEN_MULT_EXPR:
- case WIDEN_MULT_PLUS_EXPR:
- case WIDEN_MULT_MINUS_EXPR:
- case DOT_PROD_EXPR:
- case FMA_EXPR:
- case TRUNC_DIV_EXPR:
- case CEIL_DIV_EXPR:
- case FLOOR_DIV_EXPR:
- case ROUND_DIV_EXPR:
- case EXACT_DIV_EXPR:
- case CEIL_MOD_EXPR:
- case FLOOR_MOD_EXPR:
- case ROUND_MOD_EXPR:
- case TRUNC_MOD_EXPR:
- case RDIV_EXPR:
- /* Division and multiplication are usually expensive. */
- return LIM_EXPENSIVE;
- case LSHIFT_EXPR:
- case RSHIFT_EXPR:
- case WIDEN_LSHIFT_EXPR:
- case LROTATE_EXPR:
- case RROTATE_EXPR:
- /* Shifts and rotates are usually expensive. */
- return LIM_EXPENSIVE;
- case CONSTRUCTOR:
- /* Make vector construction cost proportional to the number
- of elements. */
- return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
- case SSA_NAME:
- case PAREN_EXPR:
- /* Whether or not something is wrapped inside a PAREN_EXPR
- should not change move cost. Nor should an intermediate
- unpropagated SSA name copy. */
- return 0;
- default:
- return 1;
- }
- }
- /* Finds the outermost loop between OUTER and LOOP in that the memory reference
- REF is independent. If REF is not independent in LOOP, NULL is returned
- instead. */
- static struct loop *
- outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
- {
- struct loop *aloop;
- if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
- return NULL;
- for (aloop = outer;
- aloop != loop;
- aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
- if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
- && ref_indep_loop_p (aloop, ref))
- return aloop;
- if (ref_indep_loop_p (loop, ref))
- return loop;
- else
- return NULL;
- }
- /* If there is a simple load or store to a memory reference in STMT, returns
- the location of the memory reference, and sets IS_STORE according to whether
- it is a store or load. Otherwise, returns NULL. */
- static tree *
- simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
- {
- tree *lhs, *rhs;
- /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
- if (!gimple_assign_single_p (stmt))
- return NULL;
- lhs = gimple_assign_lhs_ptr (stmt);
- rhs = gimple_assign_rhs1_ptr (stmt);
- if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
- {
- *is_store = false;
- return rhs;
- }
- else if (gimple_vdef (stmt)
- && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
- {
- *is_store = true;
- return lhs;
- }
- else
- return NULL;
- }
- /* Returns the memory reference contained in STMT. */
- static mem_ref_p
- mem_ref_in_stmt (gimple stmt)
- {
- bool store;
- tree *mem = simple_mem_ref_in_stmt (stmt, &store);
- hashval_t hash;
- mem_ref_p ref;
- if (!mem)
- return NULL;
- gcc_assert (!store);
- hash = iterative_hash_expr (*mem, 0);
- ref = memory_accesses.refs->find_with_hash (*mem, hash);
- gcc_assert (ref != NULL);
- return ref;
- }
- /* From a controlling predicate in DOM determine the arguments from
- the PHI node PHI that are chosen if the predicate evaluates to
- true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
- they are non-NULL. Returns true if the arguments can be determined,
- else return false. */
- static bool
- extract_true_false_args_from_phi (basic_block dom, gphi *phi,
- tree *true_arg_p, tree *false_arg_p)
- {
- basic_block bb = gimple_bb (phi);
- edge true_edge, false_edge, tem;
- tree arg0 = NULL_TREE, arg1 = NULL_TREE;
- /* We have to verify that one edge into the PHI node is dominated
- by the true edge of the predicate block and the other edge
- dominated by the false edge. This ensures that the PHI argument
- we are going to take is completely determined by the path we
- take from the predicate block.
- We can only use BB dominance checks below if the destination of
- the true/false edges are dominated by their edge, thus only
- have a single predecessor. */
- extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
- tem = EDGE_PRED (bb, 0);
- if (tem == true_edge
- || (single_pred_p (true_edge->dest)
- && (tem->src == true_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, true_edge->dest))))
- arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
- else if (tem == false_edge
- || (single_pred_p (false_edge->dest)
- && (tem->src == false_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, false_edge->dest))))
- arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
- else
- return false;
- tem = EDGE_PRED (bb, 1);
- if (tem == true_edge
- || (single_pred_p (true_edge->dest)
- && (tem->src == true_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, true_edge->dest))))
- arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
- else if (tem == false_edge
- || (single_pred_p (false_edge->dest)
- && (tem->src == false_edge->dest
- || dominated_by_p (CDI_DOMINATORS,
- tem->src, false_edge->dest))))
- arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
- else
- return false;
- if (!arg0 || !arg1)
- return false;
- if (true_arg_p)
- *true_arg_p = arg0;
- if (false_arg_p)
- *false_arg_p = arg1;
- return true;
- }
- /* Determine the outermost loop to that it is possible to hoist a statement
- STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
- the outermost loop in that the value computed by STMT is invariant.
- If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
- we preserve the fact whether STMT is executed. It also fills other related
- information to LIM_DATA (STMT).
- The function returns false if STMT cannot be hoisted outside of the loop it
- is defined in, and true otherwise. */
- static bool
- determine_max_movement (gimple stmt, bool must_preserve_exec)
- {
- basic_block bb = gimple_bb (stmt);
- struct loop *loop = bb->loop_father;
- struct loop *level;
- struct lim_aux_data *lim_data = get_lim_data (stmt);
- tree val;
- ssa_op_iter iter;
- if (must_preserve_exec)
- level = ALWAYS_EXECUTED_IN (bb);
- else
- level = superloop_at_depth (loop, 1);
- lim_data->max_loop = level;
- if (gphi *phi = dyn_cast <gphi *> (stmt))
- {
- use_operand_p use_p;
- unsigned min_cost = UINT_MAX;
- unsigned total_cost = 0;
- struct lim_aux_data *def_data;
- /* We will end up promoting dependencies to be unconditionally
- evaluated. For this reason the PHI cost (and thus the
- cost we remove from the loop by doing the invariant motion)
- is that of the cheapest PHI argument dependency chain. */
- FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
- {
- val = USE_FROM_PTR (use_p);
- if (TREE_CODE (val) != SSA_NAME)
- {
- /* Assign const 1 to constants. */
- min_cost = MIN (min_cost, 1);
- total_cost += 1;
- continue;
- }
- if (!add_dependency (val, lim_data, loop, false))
- return false;
- gimple def_stmt = SSA_NAME_DEF_STMT (val);
- if (gimple_bb (def_stmt)
- && gimple_bb (def_stmt)->loop_father == loop)
- {
- def_data = get_lim_data (def_stmt);
- if (def_data)
- {
- min_cost = MIN (min_cost, def_data->cost);
- total_cost += def_data->cost;
- }
- }
- }
- min_cost = MIN (min_cost, total_cost);
- lim_data->cost += min_cost;
- if (gimple_phi_num_args (phi) > 1)
- {
- basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
- gimple cond;
- if (gsi_end_p (gsi_last_bb (dom)))
- return false;
- cond = gsi_stmt (gsi_last_bb (dom));
- if (gimple_code (cond) != GIMPLE_COND)
- return false;
- /* Verify that this is an extended form of a diamond and
- the PHI arguments are completely controlled by the
- predicate in DOM. */
- if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
- return false;
- /* Fold in dependencies and cost of the condition. */
- FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
- {
- if (!add_dependency (val, lim_data, loop, false))
- return false;
- def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
- if (def_data)
- total_cost += def_data->cost;
- }
- /* We want to avoid unconditionally executing very expensive
- operations. As costs for our dependencies cannot be
- negative just claim we are not invariand for this case.
- We also are not sure whether the control-flow inside the
- loop will vanish. */
- if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
- && !(min_cost != 0
- && total_cost / min_cost <= 2))
- return false;
- /* Assume that the control-flow in the loop will vanish.
- ??? We should verify this and not artificially increase
- the cost if that is not the case. */
- lim_data->cost += stmt_cost (stmt);
- }
- return true;
- }
- else
- FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
- if (!add_dependency (val, lim_data, loop, true))
- return false;
- if (gimple_vuse (stmt))
- {
- mem_ref_p ref = mem_ref_in_stmt (stmt);
- if (ref)
- {
- lim_data->max_loop
- = outermost_indep_loop (lim_data->max_loop, loop, ref);
- if (!lim_data->max_loop)
- return false;
- }
- else
- {
- if ((val = gimple_vuse (stmt)) != NULL_TREE)
- {
- if (!add_dependency (val, lim_data, loop, false))
- return false;
- }
- }
- }
- lim_data->cost += stmt_cost (stmt);
- return true;
- }
- /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
- and that one of the operands of this statement is computed by STMT.
- Ensure that STMT (together with all the statements that define its
- operands) is hoisted at least out of the loop LEVEL. */
- static void
- set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
- {
- struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
- struct lim_aux_data *lim_data;
- gimple dep_stmt;
- unsigned i;
- stmt_loop = find_common_loop (orig_loop, stmt_loop);
- lim_data = get_lim_data (stmt);
- if (lim_data != NULL && lim_data->tgt_loop != NULL)
- stmt_loop = find_common_loop (stmt_loop,
- loop_outer (lim_data->tgt_loop));
- if (flow_loop_nested_p (stmt_loop, level))
- return;
- gcc_assert (level == lim_data->max_loop
- || flow_loop_nested_p (lim_data->max_loop, level));
- lim_data->tgt_loop = level;
- FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
- set_level (dep_stmt, orig_loop, level);
- }
- /* Determines an outermost loop from that we want to hoist the statement STMT.
- For now we chose the outermost possible loop. TODO -- use profiling
- information to set it more sanely. */
- static void
- set_profitable_level (gimple stmt)
- {
- set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
- }
- /* Returns true if STMT is a call that has side effects. */
- static bool
- nonpure_call_p (gimple stmt)
- {
- if (gimple_code (stmt) != GIMPLE_CALL)
- return false;
- return gimple_has_side_effects (stmt);
- }
- /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
- static gimple
- rewrite_reciprocal (gimple_stmt_iterator *bsi)
- {
- gassign *stmt, *stmt1, *stmt2;
- tree name, lhs, type;
- tree real_one;
- gimple_stmt_iterator gsi;
- stmt = as_a <gassign *> (gsi_stmt (*bsi));
- lhs = gimple_assign_lhs (stmt);
- type = TREE_TYPE (lhs);
- real_one = build_one_cst (type);
- name = make_temp_ssa_name (type, NULL, "reciptmp");
- stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
- gimple_assign_rhs2 (stmt));
- stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
- gimple_assign_rhs1 (stmt));
- /* Replace division stmt with reciprocal and multiply stmts.
- The multiply stmt is not invariant, so update iterator
- and avoid rescanning. */
- gsi = *bsi;
- gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
- gsi_replace (&gsi, stmt2, true);
- /* Continue processing with invariant reciprocal statement. */
- return stmt1;
- }
- /* Check if the pattern at *BSI is a bittest of the form
- (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
- static gimple
- rewrite_bittest (gimple_stmt_iterator *bsi)
- {
- gassign *stmt;
- gimple stmt1;
- gassign *stmt2;
- gimple use_stmt;
- gcond *cond_stmt;
- tree lhs, name, t, a, b;
- use_operand_p use;
- stmt = as_a <gassign *> (gsi_stmt (*bsi));
- lhs = gimple_assign_lhs (stmt);
- /* Verify that the single use of lhs is a comparison against zero. */
- if (TREE_CODE (lhs) != SSA_NAME
- || !single_imm_use (lhs, &use, &use_stmt))
- return stmt;
- cond_stmt = dyn_cast <gcond *> (use_stmt);
- if (!cond_stmt)
- return stmt;
- if (gimple_cond_lhs (cond_stmt) != lhs
- || (gimple_cond_code (cond_stmt) != NE_EXPR
- && gimple_cond_code (cond_stmt) != EQ_EXPR)
- || !integer_zerop (gimple_cond_rhs (cond_stmt)))
- return stmt;
- /* Get at the operands of the shift. The rhs is TMP1 & 1. */
- stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
- if (gimple_code (stmt1) != GIMPLE_ASSIGN)
- return stmt;
- /* There is a conversion in between possibly inserted by fold. */
- if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
- {
- t = gimple_assign_rhs1 (stmt1);
- if (TREE_CODE (t) != SSA_NAME
- || !has_single_use (t))
- return stmt;
- stmt1 = SSA_NAME_DEF_STMT (t);
- if (gimple_code (stmt1) != GIMPLE_ASSIGN)
- return stmt;
- }
- /* Verify that B is loop invariant but A is not. Verify that with
- all the stmt walking we are still in the same loop. */
- if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
- || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
- return stmt;
- a = gimple_assign_rhs1 (stmt1);
- b = gimple_assign_rhs2 (stmt1);
- if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
- && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
- {
- gimple_stmt_iterator rsi;
- /* 1 << B */
- t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
- build_int_cst (TREE_TYPE (a), 1), b);
- name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
- stmt1 = gimple_build_assign (name, t);
- /* A & (1 << B) */
- t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
- name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
- stmt2 = gimple_build_assign (name, t);
- /* Replace the SSA_NAME we compare against zero. Adjust
- the type of zero accordingly. */
- SET_USE (use, name);
- gimple_cond_set_rhs (cond_stmt,
- build_int_cst_type (TREE_TYPE (name),
- 0));
- /* Don't use gsi_replace here, none of the new assignments sets
- the variable originally set in stmt. Move bsi to stmt1, and
- then remove the original stmt, so that we get a chance to
- retain debug info for it. */
- rsi = *bsi;
- gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
- gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
- gsi_remove (&rsi, true);
- return stmt1;
- }
- return stmt;
- }
- /* For each statement determines the outermost loop in that it is invariant,
- - statements on whose motion it depends and the cost of the computation.
- - This information is stored to the LIM_DATA structure associated with
- - each statement. */
- class invariantness_dom_walker : public dom_walker
- {
- public:
- invariantness_dom_walker (cdi_direction direction)
- : dom_walker (direction) {}
- virtual void before_dom_children (basic_block);
- };
- /* Determine the outermost loops in that statements in basic block BB are
- invariant, and record them to the LIM_DATA associated with the statements.
- Callback for dom_walker. */
- void
- invariantness_dom_walker::before_dom_children (basic_block bb)
- {
- enum move_pos pos;
- gimple_stmt_iterator bsi;
- gimple stmt;
- bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
- struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
- struct lim_aux_data *lim_data;
- if (!loop_outer (bb->loop_father))
- return;
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
- bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
- /* Look at PHI nodes, but only if there is at most two.
- ??? We could relax this further by post-processing the inserted
- code and transforming adjacent cond-exprs with the same predicate
- to control flow again. */
- bsi = gsi_start_phis (bb);
- if (!gsi_end_p (bsi)
- && ((gsi_next (&bsi), gsi_end_p (bsi))
- || (gsi_next (&bsi), gsi_end_p (bsi))))
- for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
- {
- stmt = gsi_stmt (bsi);
- pos = movement_possibility (stmt);
- if (pos == MOVE_IMPOSSIBLE)
- continue;
- lim_data = init_lim_data (stmt);
- lim_data->always_executed_in = outermost;
- if (!determine_max_movement (stmt, false))
- {
- lim_data->max_loop = NULL;
- continue;
- }
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- print_gimple_stmt (dump_file, stmt, 2, 0);
- fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
- loop_depth (lim_data->max_loop),
- lim_data->cost);
- }
- if (lim_data->cost >= LIM_EXPENSIVE)
- set_profitable_level (stmt);
- }
- for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
- {
- stmt = gsi_stmt (bsi);
- pos = movement_possibility (stmt);
- if (pos == MOVE_IMPOSSIBLE)
- {
- if (nonpure_call_p (stmt))
- {
- maybe_never = true;
- outermost = NULL;
- }
- /* Make sure to note always_executed_in for stores to make
- store-motion work. */
- else if (stmt_makes_single_store (stmt))
- {
- struct lim_aux_data *lim_data = init_lim_data (stmt);
- lim_data->always_executed_in = outermost;
- }
- continue;
- }
- if (is_gimple_assign (stmt)
- && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
- == GIMPLE_BINARY_RHS))
- {
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- struct loop *ol1 = outermost_invariant_loop (op1,
- loop_containing_stmt (stmt));
- /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
- to be hoisted out of loop, saving expensive divide. */
- if (pos == MOVE_POSSIBLE
- && gimple_assign_rhs_code (stmt) == RDIV_EXPR
- && flag_unsafe_math_optimizations
- && !flag_trapping_math
- && ol1 != NULL
- && outermost_invariant_loop (op0, ol1) == NULL)
- stmt = rewrite_reciprocal (&bsi);
- /* If the shift count is invariant, convert (A >> B) & 1 to
- A & (1 << B) allowing the bit mask to be hoisted out of the loop
- saving an expensive shift. */
- if (pos == MOVE_POSSIBLE
- && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
- && integer_onep (op1)
- && TREE_CODE (op0) == SSA_NAME
- && has_single_use (op0))
- stmt = rewrite_bittest (&bsi);
- }
- lim_data = init_lim_data (stmt);
- lim_data->always_executed_in = outermost;
- if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
- continue;
- if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
- {
- lim_data->max_loop = NULL;
- continue;
- }
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- print_gimple_stmt (dump_file, stmt, 2, 0);
- fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
- loop_depth (lim_data->max_loop),
- lim_data->cost);
- }
- if (lim_data->cost >= LIM_EXPENSIVE)
- set_profitable_level (stmt);
- }
- }
- class move_computations_dom_walker : public dom_walker
- {
- public:
- move_computations_dom_walker (cdi_direction direction)
- : dom_walker (direction), todo_ (0) {}
- virtual void before_dom_children (basic_block);
- unsigned int todo_;
- };
- /* Hoist the statements in basic block BB out of the loops prescribed by
- data stored in LIM_DATA structures associated with each statement. Callback
- for walk_dominator_tree. */
- void
- move_computations_dom_walker::before_dom_children (basic_block bb)
- {
- struct loop *level;
- unsigned cost = 0;
- struct lim_aux_data *lim_data;
- if (!loop_outer (bb->loop_father))
- return;
- for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
- {
- gassign *new_stmt;
- gphi *stmt = bsi.phi ();
- lim_data = get_lim_data (stmt);
- if (lim_data == NULL)
- {
- gsi_next (&bsi);
- continue;
- }
- cost = lim_data->cost;
- level = lim_data->tgt_loop;
- clear_lim_data (stmt);
- if (!level)
- {
- gsi_next (&bsi);
- continue;
- }
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Moving PHI node\n");
- print_gimple_stmt (dump_file, stmt, 0, 0);
- fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
- cost, level->num);
- }
- if (gimple_phi_num_args (stmt) == 1)
- {
- tree arg = PHI_ARG_DEF (stmt, 0);
- new_stmt = gimple_build_assign (gimple_phi_result (stmt),
- TREE_CODE (arg), arg);
- }
- else
- {
- basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
- gimple cond = gsi_stmt (gsi_last_bb (dom));
- tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
- /* Get the PHI arguments corresponding to the true and false
- edges of COND. */
- extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
- gcc_assert (arg0 && arg1);
- t = build2 (gimple_cond_code (cond), boolean_type_node,
- gimple_cond_lhs (cond), gimple_cond_rhs (cond));
- new_stmt = gimple_build_assign (gimple_phi_result (stmt),
- COND_EXPR, t, arg0, arg1);
- todo_ |= TODO_cleanup_cfg;
- }
- if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
- && (!ALWAYS_EXECUTED_IN (bb)
- || (ALWAYS_EXECUTED_IN (bb) != level
- && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
- {
- tree lhs = gimple_assign_lhs (new_stmt);
- SSA_NAME_RANGE_INFO (lhs) = NULL;
- SSA_NAME_ANTI_RANGE_P (lhs) = 0;
- }
- gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
- remove_phi_node (&bsi, false);
- }
- for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
- {
- edge e;
- gimple stmt = gsi_stmt (bsi);
- lim_data = get_lim_data (stmt);
- if (lim_data == NULL)
- {
- gsi_next (&bsi);
- continue;
- }
- cost = lim_data->cost;
- level = lim_data->tgt_loop;
- clear_lim_data (stmt);
- if (!level)
- {
- gsi_next (&bsi);
- continue;
- }
- /* We do not really want to move conditionals out of the loop; we just
- placed it here to force its operands to be moved if necessary. */
- if (gimple_code (stmt) == GIMPLE_COND)
- continue;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Moving statement\n");
- print_gimple_stmt (dump_file, stmt, 0, 0);
- fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
- cost, level->num);
- }
- e = loop_preheader_edge (level);
- gcc_assert (!gimple_vdef (stmt));
- if (gimple_vuse (stmt))
- {
- /* The new VUSE is the one from the virtual PHI in the loop
- header or the one already present. */
- gphi_iterator gsi2;
- for (gsi2 = gsi_start_phis (e->dest);
- !gsi_end_p (gsi2); gsi_next (&gsi2))
- {
- gphi *phi = gsi2.phi ();
- if (virtual_operand_p (gimple_phi_result (phi)))
- {
- gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
- break;
- }
- }
- }
- gsi_remove (&bsi, false);
- if (gimple_has_lhs (stmt)
- && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
- && (!ALWAYS_EXECUTED_IN (bb)
- || !(ALWAYS_EXECUTED_IN (bb) == level
- || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
- {
- tree lhs = gimple_get_lhs (stmt);
- SSA_NAME_RANGE_INFO (lhs) = NULL;
- SSA_NAME_ANTI_RANGE_P (lhs) = 0;
- }
- /* In case this is a stmt that is not unconditionally executed
- when the target loop header is executed and the stmt may
- invoke undefined integer or pointer overflow rewrite it to
- unsigned arithmetic. */
- if (is_gimple_assign (stmt)
- && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
- && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
- && arith_code_with_undefined_signed_overflow
- (gimple_assign_rhs_code (stmt))
- && (!ALWAYS_EXECUTED_IN (bb)
- || !(ALWAYS_EXECUTED_IN (bb) == level
- || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
- gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
- else
- gsi_insert_on_edge (e, stmt);
- }
- }
- /* Hoist the statements out of the loops prescribed by data stored in
- LIM_DATA structures associated with each statement.*/
- static unsigned int
- move_computations (void)
- {
- move_computations_dom_walker walker (CDI_DOMINATORS);
- walker.walk (cfun->cfg->x_entry_block_ptr);
- gsi_commit_edge_inserts ();
- if (need_ssa_update_p (cfun))
- rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
- return walker.todo_;
- }
- /* Checks whether the statement defining variable *INDEX can be hoisted
- out of the loop passed in DATA. Callback for for_each_index. */
- static bool
- may_move_till (tree ref, tree *index, void *data)
- {
- struct loop *loop = (struct loop *) data, *max_loop;
- /* If REF is an array reference, check also that the step and the lower
- bound is invariant in LOOP. */
- if (TREE_CODE (ref) == ARRAY_REF)
- {
- tree step = TREE_OPERAND (ref, 3);
- tree lbound = TREE_OPERAND (ref, 2);
- max_loop = outermost_invariant_loop (step, loop);
- if (!max_loop)
- return false;
- max_loop = outermost_invariant_loop (lbound, loop);
- if (!max_loop)
- return false;
- }
- max_loop = outermost_invariant_loop (*index, loop);
- if (!max_loop)
- return false;
- return true;
- }
- /* If OP is SSA NAME, force the statement that defines it to be
- moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
- static void
- force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
- {
- gimple stmt;
- if (!op
- || is_gimple_min_invariant (op))
- return;
- gcc_assert (TREE_CODE (op) == SSA_NAME);
- stmt = SSA_NAME_DEF_STMT (op);
- if (gimple_nop_p (stmt))
- return;
- set_level (stmt, orig_loop, loop);
- }
- /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
- the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
- for_each_index. */
- struct fmt_data
- {
- struct loop *loop;
- struct loop *orig_loop;
- };
- static bool
- force_move_till (tree ref, tree *index, void *data)
- {
- struct fmt_data *fmt_data = (struct fmt_data *) data;
- if (TREE_CODE (ref) == ARRAY_REF)
- {
- tree step = TREE_OPERAND (ref, 3);
- tree lbound = TREE_OPERAND (ref, 2);
- force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
- force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
- }
- force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
- return true;
- }
- /* A function to free the mem_ref object OBJ. */
- static void
- memref_free (struct im_mem_ref *mem)
- {
- mem->accesses_in_loop.release ();
- }
- /* Allocates and returns a memory reference description for MEM whose hash
- value is HASH and id is ID. */
- static mem_ref_p
- mem_ref_alloc (tree mem, unsigned hash, unsigned id)
- {
- mem_ref_p ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
- ao_ref_init (&ref->mem, mem);
- ref->id = id;
- ref->hash = hash;
- ref->stored = NULL;
- bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
- bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
- ref->accesses_in_loop.create (1);
- return ref;
- }
- /* Records memory reference location *LOC in LOOP to the memory reference
- description REF. The reference occurs in statement STMT. */
- static void
- record_mem_ref_loc (mem_ref_p ref, gimple stmt, tree *loc)
- {
- mem_ref_loc aref;
- aref.stmt = stmt;
- aref.ref = loc;
- ref->accesses_in_loop.safe_push (aref);
- }
- /* Set the LOOP bit in REF stored bitmap and allocate that if
- necessary. Return whether a bit was changed. */
- static bool
- set_ref_stored_in_loop (mem_ref_p ref, struct loop *loop)
- {
- if (!ref->stored)
- ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
- return bitmap_set_bit (ref->stored, loop->num);
- }
- /* Marks reference REF as stored in LOOP. */
- static void
- mark_ref_stored (mem_ref_p ref, struct loop *loop)
- {
- while (loop != current_loops->tree_root
- && set_ref_stored_in_loop (ref, loop))
- loop = loop_outer (loop);
- }
- /* Gathers memory references in statement STMT in LOOP, storing the
- information about them in the memory_accesses structure. Marks
- the vops accessed through unrecognized statements there as
- well. */
- static void
- gather_mem_refs_stmt (struct loop *loop, gimple stmt)
- {
- tree *mem = NULL;
- hashval_t hash;
- im_mem_ref **slot;
- mem_ref_p ref;
- bool is_stored;
- unsigned id;
- if (!gimple_vuse (stmt))
- return;
- mem = simple_mem_ref_in_stmt (stmt, &is_stored);
- if (!mem)
- {
- /* We use the shared mem_ref for all unanalyzable refs. */
- id = UNANALYZABLE_MEM_ID;
- ref = memory_accesses.refs_list[id];
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
- print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
- }
- is_stored = gimple_vdef (stmt);
- }
- else
- {
- hash = iterative_hash_expr (*mem, 0);
- slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
- if (*slot)
- {
- ref = (mem_ref_p) *slot;
- id = ref->id;
- }
- else
- {
- id = memory_accesses.refs_list.length ();
- ref = mem_ref_alloc (*mem, hash, id);
- memory_accesses.refs_list.safe_push (ref);
- *slot = ref;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Memory reference %u: ", id);
- print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
- fprintf (dump_file, "\n");
- }
- }
- record_mem_ref_loc (ref, stmt, mem);
- }
- bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
- if (is_stored)
- {
- bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
- mark_ref_stored (ref, loop);
- }
- return;
- }
- static unsigned *bb_loop_postorder;
- /* qsort sort function to sort blocks after their loop fathers postorder. */
- static int
- sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
- {
- basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
- basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
- struct loop *loop1 = bb1->loop_father;
- struct loop *loop2 = bb2->loop_father;
- if (loop1->num == loop2->num)
- return 0;
- return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
- }
- /* qsort sort function to sort ref locs after their loop fathers postorder. */
- static int
- sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
- {
- mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
- mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
- struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
- struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
- if (loop1->num == loop2->num)
- return 0;
- return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
- }
- /* Gathers memory references in loops. */
- static void
- analyze_memory_references (void)
- {
- gimple_stmt_iterator bsi;
- basic_block bb, *bbs;
- struct loop *loop, *outer;
- unsigned i, n;
- /* Collect all basic-blocks in loops and sort them after their
- loops postorder. */
- i = 0;
- bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
- FOR_EACH_BB_FN (bb, cfun)
- if (bb->loop_father != current_loops->tree_root)
- bbs[i++] = bb;
- n = i;
- qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
- /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
- That results in better locality for all the bitmaps. */
- for (i = 0; i < n; ++i)
- {
- basic_block bb = bbs[i];
- for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
- gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
- }
- /* Sort the location list of gathered memory references after their
- loop postorder number. */
- im_mem_ref *ref;
- FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
- ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
- free (bbs);
- // free (bb_loop_postorder);
- /* Propagate the information about accessed memory references up
- the loop hierarchy. */
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
- {
- /* Finalize the overall touched references (including subloops). */
- bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
- &memory_accesses.refs_stored_in_loop[loop->num]);
- /* Propagate the information about accessed memory references up
- the loop hierarchy. */
- outer = loop_outer (loop);
- if (outer == current_loops->tree_root)
- continue;
- bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
- &memory_accesses.all_refs_stored_in_loop[loop->num]);
- }
- }
- /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
- tree_to_aff_combination_expand. */
- static bool
- mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
- hash_map<tree, name_expansion *> **ttae_cache)
- {
- /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
- object and their offset differ in such a way that the locations cannot
- overlap, then they cannot alias. */
- widest_int size1, size2;
- aff_tree off1, off2;
- /* Perform basic offset and type-based disambiguation. */
- if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
- return false;
- /* The expansion of addresses may be a bit expensive, thus we only do
- the check at -O2 and higher optimization levels. */
- if (optimize < 2)
- return true;
- get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
- get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
- aff_combination_expand (&off1, ttae_cache);
- aff_combination_expand (&off2, ttae_cache);
- aff_combination_scale (&off1, -1);
- aff_combination_add (&off2, &off1);
- if (aff_comb_cannot_overlap_p (&off2, size1, size2))
- return false;
- return true;
- }
- /* Compare function for bsearch searching for reference locations
- in a loop. */
- static int
- find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
- {
- struct loop *loop = (struct loop *)const_cast<void *>(loop_);
- mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
- struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
- if (loop->num == loc_loop->num
- || flow_loop_nested_p (loop, loc_loop))
- return 0;
- return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
- ? -1 : 1);
- }
- /* Iterates over all locations of REF in LOOP and its subloops calling
- fn.operator() with the location as argument. When that operator
- returns true the iteration is stopped and true is returned.
- Otherwise false is returned. */
- template <typename FN>
- static bool
- for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
- {
- unsigned i;
- mem_ref_loc_p loc;
- /* Search for the cluster of locs in the accesses_in_loop vector
- which is sorted after postorder index of the loop father. */
- loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
- if (!loc)
- return false;
- /* We have found one location inside loop or its sub-loops. Iterate
- both forward and backward to cover the whole cluster. */
- i = loc - ref->accesses_in_loop.address ();
- while (i > 0)
- {
- --i;
- mem_ref_loc_p l = &ref->accesses_in_loop[i];
- if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
- break;
- if (fn (l))
- return true;
- }
- for (i = loc - ref->accesses_in_loop.address ();
- i < ref->accesses_in_loop.length (); ++i)
- {
- mem_ref_loc_p l = &ref->accesses_in_loop[i];
- if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
- break;
- if (fn (l))
- return true;
- }
- return false;
- }
- /* Rewrites location LOC by TMP_VAR. */
- struct rewrite_mem_ref_loc
- {
- rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
- bool operator () (mem_ref_loc_p loc);
- tree tmp_var;
- };
- bool
- rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
- {
- *loc->ref = tmp_var;
- update_stmt (loc->stmt);
- return false;
- }
- /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
- static void
- rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
- {
- for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
- }
- /* Stores the first reference location in LOCP. */
- struct first_mem_ref_loc_1
- {
- first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
- bool operator () (mem_ref_loc_p loc);
- mem_ref_loc_p *locp;
- };
- bool
- first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
- {
- *locp = loc;
- return true;
- }
- /* Returns the first reference location to REF in LOOP. */
- static mem_ref_loc_p
- first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
- {
- mem_ref_loc_p locp = NULL;
- for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
- return locp;
- }
- struct prev_flag_edges {
- /* Edge to insert new flag comparison code. */
- edge append_cond_position;
- /* Edge for fall through from previous flag comparison. */
- edge last_cond_fallthru;
- };
- /* Helper function for execute_sm. Emit code to store TMP_VAR into
- MEM along edge EX.
- The store is only done if MEM has changed. We do this so no
- changes to MEM occur on code paths that did not originally store
- into it.
- The common case for execute_sm will transform:
- for (...) {
- if (foo)
- stuff;
- else
- MEM = TMP_VAR;
- }
- into:
- lsm = MEM;
- for (...) {
- if (foo)
- stuff;
- else
- lsm = TMP_VAR;
- }
- MEM = lsm;
- This function will generate:
- lsm = MEM;
- lsm_flag = false;
- ...
- for (...) {
- if (foo)
- stuff;
- else {
- lsm = TMP_VAR;
- lsm_flag = true;
- }
- }
- if (lsm_flag) <--
- MEM = lsm; <--
- */
- static void
- execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
- {
- basic_block new_bb, then_bb, old_dest;
- bool loop_has_only_one_exit;
- edge then_old_edge, orig_ex = ex;
- gimple_stmt_iterator gsi;
- gimple stmt;
- struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
- bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
- /* ?? Insert store after previous store if applicable. See note
- below. */
- if (prev_edges)
- ex = prev_edges->append_cond_position;
- loop_has_only_one_exit = single_pred_p (ex->dest);
- if (loop_has_only_one_exit)
- ex = split_block_after_labels (ex->dest);
- old_dest = ex->dest;
- new_bb = split_edge (ex);
- then_bb = create_empty_bb (new_bb);
- if (irr)
- then_bb->flags = BB_IRREDUCIBLE_LOOP;
- add_bb_to_loop (then_bb, new_bb->loop_father);
- gsi = gsi_start_bb (new_bb);
- stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
- NULL_TREE, NULL_TREE);
- gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
- gsi = gsi_start_bb (then_bb);
- /* Insert actual store. */
- stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
- gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
- make_edge (new_bb, then_bb,
- EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
- make_edge (new_bb, old_dest,
- EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
- then_old_edge = make_edge (then_bb, old_dest,
- EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
- set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
- if (prev_edges)
- {
- basic_block prevbb = prev_edges->last_cond_fallthru->src;
- redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
- set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
- set_immediate_dominator (CDI_DOMINATORS, old_dest,
- recompute_dominator (CDI_DOMINATORS, old_dest));
- }
- /* ?? Because stores may alias, they must happen in the exact
- sequence they originally happened. Save the position right after
- the (_lsm) store we just created so we can continue appending after
- it and maintain the original order. */
- {
- struct prev_flag_edges *p;
- if (orig_ex->aux)
- orig_ex->aux = NULL;
- alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
- p = (struct prev_flag_edges *) orig_ex->aux;
- p->append_cond_position = then_old_edge;
- p->last_cond_fallthru = find_edge (new_bb, old_dest);
- orig_ex->aux = (void *) p;
- }
- if (!loop_has_only_one_exit)
- for (gphi_iterator gpi = gsi_start_phis (old_dest);
- !gsi_end_p (gpi); gsi_next (&gpi))
- {
- gphi *phi = gpi.phi ();
- unsigned i;
- for (i = 0; i < gimple_phi_num_args (phi); i++)
- if (gimple_phi_arg_edge (phi, i)->src == new_bb)
- {
- tree arg = gimple_phi_arg_def (phi, i);
- add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
- update_stmt (phi);
- }
- }
- /* Remove the original fall through edge. This was the
- single_succ_edge (new_bb). */
- EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
- }
- /* When REF is set on the location, set flag indicating the store. */
- struct sm_set_flag_if_changed
- {
- sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
- bool operator () (mem_ref_loc_p loc);
- tree flag;
- };
- bool
- sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
- {
- /* Only set the flag for writes. */
- if (is_gimple_assign (loc->stmt)
- && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
- {
- gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
- gimple stmt = gimple_build_assign (flag, boolean_true_node);
- gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
- }
- return false;
- }
- /* Helper function for execute_sm. On every location where REF is
- set, set an appropriate flag indicating the store. */
- static tree
- execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
- {
- tree flag;
- char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
- flag = create_tmp_reg (boolean_type_node, str);
- for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
- return flag;
- }
- /* Executes store motion of memory reference REF from LOOP.
- Exits from the LOOP are stored in EXITS. The initialization of the
- temporary variable is put to the preheader of the loop, and assignments
- to the reference from the temporary variable are emitted to exits. */
- static void
- execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
- {
- tree tmp_var, store_flag = NULL_TREE;
- unsigned i;
- gassign *load;
- struct fmt_data fmt_data;
- edge ex;
- struct lim_aux_data *lim_data;
- bool multi_threaded_model_p = false;
- gimple_stmt_iterator gsi;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Executing store motion of ");
- print_generic_expr (dump_file, ref->mem.ref, 0);
- fprintf (dump_file, " from loop %d\n", loop->num);
- }
- tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
- get_lsm_tmp_name (ref->mem.ref, ~0));
- fmt_data.loop = loop;
- fmt_data.orig_loop = loop;
- for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
- if (bb_in_transaction (loop_preheader_edge (loop)->src)
- || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
- multi_threaded_model_p = true;
- if (multi_threaded_model_p)
- store_flag = execute_sm_if_changed_flag_set (loop, ref);
- rewrite_mem_refs (loop, ref, tmp_var);
- /* Emit the load code on a random exit edge or into the latch if
- the loop does not exit, so that we are sure it will be processed
- by move_computations after all dependencies. */
- gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
- /* FIXME/TODO: For the multi-threaded variant, we could avoid this
- load altogether, since the store is predicated by a flag. We
- could, do the load only if it was originally in the loop. */
- load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
- lim_data = init_lim_data (load);
- lim_data->max_loop = loop;
- lim_data->tgt_loop = loop;
- gsi_insert_before (&gsi, load, GSI_SAME_STMT);
- if (multi_threaded_model_p)
- {
- load = gimple_build_assign (store_flag, boolean_false_node);
- lim_data = init_lim_data (load);
- lim_data->max_loop = loop;
- lim_data->tgt_loop = loop;
- gsi_insert_before (&gsi, load, GSI_SAME_STMT);
- }
- /* Sink the store to every exit from the loop. */
- FOR_EACH_VEC_ELT (exits, i, ex)
- if (!multi_threaded_model_p)
- {
- gassign *store;
- store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
- gsi_insert_on_edge (ex, store);
- }
- else
- execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
- }
- /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
- edges of the LOOP. */
- static void
- hoist_memory_references (struct loop *loop, bitmap mem_refs,
- vec<edge> exits)
- {
- mem_ref_p ref;
- unsigned i;
- bitmap_iterator bi;
- EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
- {
- ref = memory_accesses.refs_list[i];
- execute_sm (loop, exits, ref);
- }
- }
- struct ref_always_accessed
- {
- ref_always_accessed (struct loop *loop_, bool stored_p_)
- : loop (loop_), stored_p (stored_p_) {}
- bool operator () (mem_ref_loc_p loc);
- struct loop *loop;
- bool stored_p;
- };
- bool
- ref_always_accessed::operator () (mem_ref_loc_p loc)
- {
- struct loop *must_exec;
- if (!get_lim_data (loc->stmt))
- return false;
- /* If we require an always executed store make sure the statement
- stores to the reference. */
- if (stored_p)
- {
- tree lhs = gimple_get_lhs (loc->stmt);
- if (!lhs
- || lhs != *loc->ref)
- return false;
- }
- must_exec = get_lim_data (loc->stmt)->always_executed_in;
- if (!must_exec)
- return false;
- if (must_exec == loop
- || flow_loop_nested_p (must_exec, loop))
- return true;
- return false;
- }
- /* Returns true if REF is always accessed in LOOP. If STORED_P is true
- make sure REF is always stored to in LOOP. */
- static bool
- ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
- {
- return for_all_locs_in_loop (loop, ref,
- ref_always_accessed (loop, stored_p));
- }
- /* Returns true if REF1 and REF2 are independent. */
- static bool
- refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
- {
- if (ref1 == ref2)
- return true;
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Querying dependency of refs %u and %u: ",
- ref1->id, ref2->id);
- if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "dependent.\n");
- return false;
- }
- else
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "independent.\n");
- return true;
- }
- }
- /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
- and its super-loops. */
- static void
- record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
- {
- /* We can propagate dependent-in-loop bits up the loop
- hierarchy to all outer loops. */
- while (loop != current_loops->tree_root
- && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
- loop = loop_outer (loop);
- }
- /* Returns true if REF is independent on all other memory references in
- LOOP. */
- static bool
- ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
- {
- bitmap refs_to_check;
- unsigned i;
- bitmap_iterator bi;
- mem_ref_p aref;
- if (stored_p)
- refs_to_check = &memory_accesses.refs_in_loop[loop->num];
- else
- refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
- if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
- return false;
- EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
- {
- aref = memory_accesses.refs_list[i];
- if (!refs_independent_p (ref, aref))
- return false;
- }
- return true;
- }
- /* Returns true if REF is independent on all other memory references in
- LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
- static bool
- ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
- {
- stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
- if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
- return true;
- if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
- return false;
- struct loop *inner = loop->inner;
- while (inner)
- {
- if (!ref_indep_loop_p_2 (inner, ref, stored_p))
- return false;
- inner = inner->next;
- }
- bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
- ref->id, loop->num, indep_p ? "independent" : "dependent");
- /* Record the computed result in the cache. */
- if (indep_p)
- {
- if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
- && stored_p)
- {
- /* If it's independend against all refs then it's independent
- against stores, too. */
- bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
- }
- }
- else
- {
- record_dep_loop (loop, ref, stored_p);
- if (!stored_p)
- {
- /* If it's dependent against stores it's dependent against
- all refs, too. */
- record_dep_loop (loop, ref, true);
- }
- }
- return indep_p;
- }
- /* Returns true if REF is independent on all other memory references in
- LOOP. */
- static bool
- ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
- {
- gcc_checking_assert (MEM_ANALYZABLE (ref));
- return ref_indep_loop_p_2 (loop, ref, false);
- }
- /* Returns true if we can perform store motion of REF from LOOP. */
- static bool
- can_sm_ref_p (struct loop *loop, mem_ref_p ref)
- {
- tree base;
- /* Can't hoist unanalyzable refs. */
- if (!MEM_ANALYZABLE (ref))
- return false;
- /* It should be movable. */
- if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
- || TREE_THIS_VOLATILE (ref->mem.ref)
- || !for_each_index (&ref->mem.ref, may_move_till, loop))
- return false;
- /* If it can throw fail, we do not properly update EH info. */
- if (tree_could_throw_p (ref->mem.ref))
- return false;
- /* If it can trap, it must be always executed in LOOP.
- Readonly memory locations may trap when storing to them, but
- tree_could_trap_p is a predicate for rvalues, so check that
- explicitly. */
- base = get_base_address (ref->mem.ref);
- if ((tree_could_trap_p (ref->mem.ref)
- || (DECL_P (base) && TREE_READONLY (base)))
- && !ref_always_accessed_p (loop, ref, true))
- return false;
- /* And it must be independent on all other memory references
- in LOOP. */
- if (!ref_indep_loop_p (loop, ref))
- return false;
- return true;
- }
- /* Marks the references in LOOP for that store motion should be performed
- in REFS_TO_SM. SM_EXECUTED is the set of references for that store
- motion was performed in one of the outer loops. */
- static void
- find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
- {
- bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
- unsigned i;
- bitmap_iterator bi;
- mem_ref_p ref;
- EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
- {
- ref = memory_accesses.refs_list[i];
- if (can_sm_ref_p (loop, ref))
- bitmap_set_bit (refs_to_sm, i);
- }
- }
- /* Checks whether LOOP (with exits stored in EXITS array) is suitable
- for a store motion optimization (i.e. whether we can insert statement
- on its exits). */
- static bool
- loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
- vec<edge> exits)
- {
- unsigned i;
- edge ex;
- FOR_EACH_VEC_ELT (exits, i, ex)
- if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
- return false;
- return true;
- }
- /* Try to perform store motion for all memory references modified inside
- LOOP. SM_EXECUTED is the bitmap of the memory references for that
- store motion was executed in one of the outer loops. */
- static void
- store_motion_loop (struct loop *loop, bitmap sm_executed)
- {
- vec<edge> exits = get_loop_exit_edges (loop);
- struct loop *subloop;
- bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
- if (loop_suitable_for_sm (loop, exits))
- {
- find_refs_for_sm (loop, sm_executed, sm_in_loop);
- hoist_memory_references (loop, sm_in_loop, exits);
- }
- exits.release ();
- bitmap_ior_into (sm_executed, sm_in_loop);
- for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
- store_motion_loop (subloop, sm_executed);
- bitmap_and_compl_into (sm_executed, sm_in_loop);
- BITMAP_FREE (sm_in_loop);
- }
- /* Try to perform store motion for all memory references modified inside
- loops. */
- static void
- store_motion (void)
- {
- struct loop *loop;
- bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
- for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
- store_motion_loop (loop, sm_executed);
- BITMAP_FREE (sm_executed);
- gsi_commit_edge_inserts ();
- }
- /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
- for each such basic block bb records the outermost loop for that execution
- of its header implies execution of bb. CONTAINS_CALL is the bitmap of
- blocks that contain a nonpure call. */
- static void
- fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
- {
- basic_block bb = NULL, *bbs, last = NULL;
- unsigned i;
- edge e;
- struct loop *inn_loop = loop;
- if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
- {
- bbs = get_loop_body_in_dom_order (loop);
- for (i = 0; i < loop->num_nodes; i++)
- {
- edge_iterator ei;
- bb = bbs[i];
- if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
- last = bb;
- if (bitmap_bit_p (contains_call, bb->index))
- break;
- FOR_EACH_EDGE (e, ei, bb->succs)
- if (!flow_bb_inside_loop_p (loop, e->dest))
- break;
- if (e)
- break;
- /* A loop might be infinite (TODO use simple loop analysis
- to disprove this if possible). */
- if (bb->flags & BB_IRREDUCIBLE_LOOP)
- break;
- if (!flow_bb_inside_loop_p (inn_loop, bb))
- break;
- if (bb->loop_father->header == bb)
- {
- if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
- break;
- /* In a loop that is always entered we may proceed anyway.
- But record that we entered it and stop once we leave it. */
- inn_loop = bb->loop_father;
- }
- }
- while (1)
- {
- SET_ALWAYS_EXECUTED_IN (last, loop);
- if (last == loop->header)
- break;
- last = get_immediate_dominator (CDI_DOMINATORS, last);
- }
- free (bbs);
- }
- for (loop = loop->inner; loop; loop = loop->next)
- fill_always_executed_in_1 (loop, contains_call);
- }
- /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
- for each such basic block bb records the outermost loop for that execution
- of its header implies execution of bb. */
- static void
- fill_always_executed_in (void)
- {
- sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
- basic_block bb;
- struct loop *loop;
- bitmap_clear (contains_call);
- FOR_EACH_BB_FN (bb, cfun)
- {
- gimple_stmt_iterator gsi;
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- if (nonpure_call_p (gsi_stmt (gsi)))
- break;
- }
- if (!gsi_end_p (gsi))
- bitmap_set_bit (contains_call, bb->index);
- }
- for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
- fill_always_executed_in_1 (loop, contains_call);
- sbitmap_free (contains_call);
- }
- /* Compute the global information needed by the loop invariant motion pass. */
- static void
- tree_ssa_lim_initialize (void)
- {
- struct loop *loop;
- unsigned i;
- bitmap_obstack_initialize (&lim_bitmap_obstack);
- gcc_obstack_init (&mem_ref_obstack);
- lim_aux_data_map = new hash_map<gimple, lim_aux_data *>;
- if (flag_tm)
- compute_transaction_bits ();
- alloc_aux_for_edges (0);
- memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
- memory_accesses.refs_list.create (100);
- /* Allocate a special, unanalyzable mem-ref with ID zero. */
- memory_accesses.refs_list.quick_push
- (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
- memory_accesses.refs_in_loop.create (number_of_loops (cfun));
- memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
- memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
- memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
- memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
- memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
- for (i = 0; i < number_of_loops (cfun); i++)
- {
- bitmap_initialize (&memory_accesses.refs_in_loop[i],
- &lim_bitmap_obstack);
- bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
- &lim_bitmap_obstack);
- bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
- &lim_bitmap_obstack);
- }
- memory_accesses.ttae_cache = NULL;
- /* Initialize bb_loop_postorder with a mapping from loop->num to
- its postorder index. */
- i = 0;
- bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
- FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
- bb_loop_postorder[loop->num] = i++;
- }
- /* Cleans up after the invariant motion pass. */
- static void
- tree_ssa_lim_finalize (void)
- {
- basic_block bb;
- unsigned i;
- mem_ref_p ref;
- free_aux_for_edges ();
- FOR_EACH_BB_FN (bb, cfun)
- SET_ALWAYS_EXECUTED_IN (bb, NULL);
- bitmap_obstack_release (&lim_bitmap_obstack);
- delete lim_aux_data_map;
- delete memory_accesses.refs;
- memory_accesses.refs = NULL;
- FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
- memref_free (ref);
- memory_accesses.refs_list.release ();
- obstack_free (&mem_ref_obstack, NULL);
- memory_accesses.refs_in_loop.release ();
- memory_accesses.refs_stored_in_loop.release ();
- memory_accesses.all_refs_stored_in_loop.release ();
- if (memory_accesses.ttae_cache)
- free_affine_expand_cache (&memory_accesses.ttae_cache);
- free (bb_loop_postorder);
- }
- /* Moves invariants from loops. Only "expensive" invariants are moved out --
- i.e. those that are likely to be win regardless of the register pressure. */
- unsigned int
- tree_ssa_lim (void)
- {
- unsigned int todo;
- tree_ssa_lim_initialize ();
- /* Gathers information about memory accesses in the loops. */
- analyze_memory_references ();
- /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
- fill_always_executed_in ();
- /* For each statement determine the outermost loop in that it is
- invariant and cost for computing the invariant. */
- invariantness_dom_walker (CDI_DOMINATORS)
- .walk (cfun->cfg->x_entry_block_ptr);
- /* Execute store motion. Force the necessary invariants to be moved
- out of the loops as well. */
- store_motion ();
- /* Move the expressions that are expensive enough. */
- todo = move_computations ();
- tree_ssa_lim_finalize ();
- return todo;
- }
- /* Loop invariant motion pass. */
- namespace {
- const pass_data pass_data_lim =
- {
- GIMPLE_PASS, /* type */
- "lim", /* name */
- OPTGROUP_LOOP, /* optinfo_flags */
- TV_LIM, /* tv_id */
- PROP_cfg, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0, /* todo_flags_finish */
- };
- class pass_lim : public gimple_opt_pass
- {
- public:
- pass_lim (gcc::context *ctxt)
- : gimple_opt_pass (pass_data_lim, ctxt)
- {}
- /* opt_pass methods: */
- opt_pass * clone () { return new pass_lim (m_ctxt); }
- virtual bool gate (function *) { return flag_tree_loop_im != 0; }
- virtual unsigned int execute (function *);
- }; // class pass_lim
- unsigned int
- pass_lim::execute (function *fun)
- {
- if (number_of_loops (fun) <= 1)
- return 0;
- return tree_ssa_lim ();
- }
- } // anon namespace
- gimple_opt_pass *
- make_pass_lim (gcc::context *ctxt)
- {
- return new pass_lim (ctxt);
- }
|