tree-ssa-loop-niter.c 113 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926
  1. /* Functions to determine/estimate number of iterations of a loop.
  2. Copyright (C) 2004-2015 Free Software Foundation, Inc.
  3. This file is part of GCC.
  4. GCC is free software; you can redistribute it and/or modify it
  5. under the terms of the GNU General Public License as published by the
  6. Free Software Foundation; either version 3, or (at your option) any
  7. later version.
  8. GCC is distributed in the hope that it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  11. for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with GCC; see the file COPYING3. If not see
  14. <http://www.gnu.org/licenses/>. */
  15. #include "config.h"
  16. #include "system.h"
  17. #include "coretypes.h"
  18. #include "tm.h"
  19. #include "hash-set.h"
  20. #include "machmode.h"
  21. #include "vec.h"
  22. #include "double-int.h"
  23. #include "input.h"
  24. #include "alias.h"
  25. #include "symtab.h"
  26. #include "wide-int.h"
  27. #include "inchash.h"
  28. #include "tree.h"
  29. #include "fold-const.h"
  30. #include "calls.h"
  31. #include "hashtab.h"
  32. #include "hard-reg-set.h"
  33. #include "function.h"
  34. #include "rtl.h"
  35. #include "flags.h"
  36. #include "statistics.h"
  37. #include "real.h"
  38. #include "fixed-value.h"
  39. #include "insn-config.h"
  40. #include "expmed.h"
  41. #include "dojump.h"
  42. #include "explow.h"
  43. #include "emit-rtl.h"
  44. #include "varasm.h"
  45. #include "stmt.h"
  46. #include "expr.h"
  47. #include "tm_p.h"
  48. #include "predict.h"
  49. #include "dominance.h"
  50. #include "cfg.h"
  51. #include "basic-block.h"
  52. #include "gimple-pretty-print.h"
  53. #include "intl.h"
  54. #include "tree-ssa-alias.h"
  55. #include "internal-fn.h"
  56. #include "gimple-expr.h"
  57. #include "is-a.h"
  58. #include "gimple.h"
  59. #include "gimplify.h"
  60. #include "gimple-iterator.h"
  61. #include "gimple-ssa.h"
  62. #include "tree-cfg.h"
  63. #include "tree-phinodes.h"
  64. #include "ssa-iterators.h"
  65. #include "tree-ssa-loop-ivopts.h"
  66. #include "tree-ssa-loop-niter.h"
  67. #include "tree-ssa-loop.h"
  68. #include "dumpfile.h"
  69. #include "cfgloop.h"
  70. #include "tree-chrec.h"
  71. #include "tree-scalar-evolution.h"
  72. #include "tree-data-ref.h"
  73. #include "params.h"
  74. #include "diagnostic-core.h"
  75. #include "tree-inline.h"
  76. #include "tree-pass.h"
  77. #include "stringpool.h"
  78. #include "tree-ssanames.h"
  79. #include "wide-int-print.h"
  80. #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
  81. /* The maximum number of dominator BBs we search for conditions
  82. of loop header copies we use for simplifying a conditional
  83. expression. */
  84. #define MAX_DOMINATORS_TO_WALK 8
  85. /*
  86. Analysis of number of iterations of an affine exit test.
  87. */
  88. /* Bounds on some value, BELOW <= X <= UP. */
  89. typedef struct
  90. {
  91. mpz_t below, up;
  92. } bounds;
  93. /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
  94. static void
  95. split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
  96. {
  97. tree type = TREE_TYPE (expr);
  98. tree op0, op1;
  99. bool negate = false;
  100. *var = expr;
  101. mpz_set_ui (offset, 0);
  102. switch (TREE_CODE (expr))
  103. {
  104. case MINUS_EXPR:
  105. negate = true;
  106. /* Fallthru. */
  107. case PLUS_EXPR:
  108. case POINTER_PLUS_EXPR:
  109. op0 = TREE_OPERAND (expr, 0);
  110. op1 = TREE_OPERAND (expr, 1);
  111. if (TREE_CODE (op1) != INTEGER_CST)
  112. break;
  113. *var = op0;
  114. /* Always sign extend the offset. */
  115. wi::to_mpz (op1, offset, SIGNED);
  116. if (negate)
  117. mpz_neg (offset, offset);
  118. break;
  119. case INTEGER_CST:
  120. *var = build_int_cst_type (type, 0);
  121. wi::to_mpz (expr, offset, TYPE_SIGN (type));
  122. break;
  123. default:
  124. break;
  125. }
  126. }
  127. /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
  128. in TYPE to MIN and MAX. */
  129. static void
  130. determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
  131. mpz_t min, mpz_t max)
  132. {
  133. wide_int minv, maxv;
  134. enum value_range_type rtype = VR_VARYING;
  135. /* If the expression is a constant, we know its value exactly. */
  136. if (integer_zerop (var))
  137. {
  138. mpz_set (min, off);
  139. mpz_set (max, off);
  140. return;
  141. }
  142. get_type_static_bounds (type, min, max);
  143. /* See if we have some range info from VRP. */
  144. if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
  145. {
  146. edge e = loop_preheader_edge (loop);
  147. signop sgn = TYPE_SIGN (type);
  148. gphi_iterator gsi;
  149. /* Either for VAR itself... */
  150. rtype = get_range_info (var, &minv, &maxv);
  151. /* Or for PHI results in loop->header where VAR is used as
  152. PHI argument from the loop preheader edge. */
  153. for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
  154. {
  155. gphi *phi = gsi.phi ();
  156. wide_int minc, maxc;
  157. if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
  158. && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
  159. == VR_RANGE))
  160. {
  161. if (rtype != VR_RANGE)
  162. {
  163. rtype = VR_RANGE;
  164. minv = minc;
  165. maxv = maxc;
  166. }
  167. else
  168. {
  169. minv = wi::max (minv, minc, sgn);
  170. maxv = wi::min (maxv, maxc, sgn);
  171. /* If the PHI result range are inconsistent with
  172. the VAR range, give up on looking at the PHI
  173. results. This can happen if VR_UNDEFINED is
  174. involved. */
  175. if (wi::gt_p (minv, maxv, sgn))
  176. {
  177. rtype = get_range_info (var, &minv, &maxv);
  178. break;
  179. }
  180. }
  181. }
  182. }
  183. if (rtype == VR_RANGE)
  184. {
  185. mpz_t minm, maxm;
  186. gcc_assert (wi::le_p (minv, maxv, sgn));
  187. mpz_init (minm);
  188. mpz_init (maxm);
  189. wi::to_mpz (minv, minm, sgn);
  190. wi::to_mpz (maxv, maxm, sgn);
  191. mpz_add (minm, minm, off);
  192. mpz_add (maxm, maxm, off);
  193. /* If the computation may not wrap or off is zero, then this
  194. is always fine. If off is negative and minv + off isn't
  195. smaller than type's minimum, or off is positive and
  196. maxv + off isn't bigger than type's maximum, use the more
  197. precise range too. */
  198. if (nowrap_type_p (type)
  199. || mpz_sgn (off) == 0
  200. || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
  201. || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
  202. {
  203. mpz_set (min, minm);
  204. mpz_set (max, maxm);
  205. mpz_clear (minm);
  206. mpz_clear (maxm);
  207. return;
  208. }
  209. mpz_clear (minm);
  210. mpz_clear (maxm);
  211. }
  212. }
  213. /* If the computation may wrap, we know nothing about the value, except for
  214. the range of the type. */
  215. if (!nowrap_type_p (type))
  216. return;
  217. /* Since the addition of OFF does not wrap, if OFF is positive, then we may
  218. add it to MIN, otherwise to MAX. */
  219. if (mpz_sgn (off) < 0)
  220. mpz_add (max, max, off);
  221. else
  222. mpz_add (min, min, off);
  223. }
  224. /* Stores the bounds on the difference of the values of the expressions
  225. (var + X) and (var + Y), computed in TYPE, to BNDS. */
  226. static void
  227. bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
  228. bounds *bnds)
  229. {
  230. int rel = mpz_cmp (x, y);
  231. bool may_wrap = !nowrap_type_p (type);
  232. mpz_t m;
  233. /* If X == Y, then the expressions are always equal.
  234. If X > Y, there are the following possibilities:
  235. a) neither of var + X and var + Y overflow or underflow, or both of
  236. them do. Then their difference is X - Y.
  237. b) var + X overflows, and var + Y does not. Then the values of the
  238. expressions are var + X - M and var + Y, where M is the range of
  239. the type, and their difference is X - Y - M.
  240. c) var + Y underflows and var + X does not. Their difference again
  241. is M - X + Y.
  242. Therefore, if the arithmetics in type does not overflow, then the
  243. bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
  244. Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
  245. (X - Y, X - Y + M). */
  246. if (rel == 0)
  247. {
  248. mpz_set_ui (bnds->below, 0);
  249. mpz_set_ui (bnds->up, 0);
  250. return;
  251. }
  252. mpz_init (m);
  253. wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
  254. mpz_add_ui (m, m, 1);
  255. mpz_sub (bnds->up, x, y);
  256. mpz_set (bnds->below, bnds->up);
  257. if (may_wrap)
  258. {
  259. if (rel > 0)
  260. mpz_sub (bnds->below, bnds->below, m);
  261. else
  262. mpz_add (bnds->up, bnds->up, m);
  263. }
  264. mpz_clear (m);
  265. }
  266. /* From condition C0 CMP C1 derives information regarding the
  267. difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
  268. and stores it to BNDS. */
  269. static void
  270. refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
  271. tree vary, mpz_t offy,
  272. tree c0, enum tree_code cmp, tree c1,
  273. bounds *bnds)
  274. {
  275. tree varc0, varc1, tmp, ctype;
  276. mpz_t offc0, offc1, loffx, loffy, bnd;
  277. bool lbound = false;
  278. bool no_wrap = nowrap_type_p (type);
  279. bool x_ok, y_ok;
  280. switch (cmp)
  281. {
  282. case LT_EXPR:
  283. case LE_EXPR:
  284. case GT_EXPR:
  285. case GE_EXPR:
  286. STRIP_SIGN_NOPS (c0);
  287. STRIP_SIGN_NOPS (c1);
  288. ctype = TREE_TYPE (c0);
  289. if (!useless_type_conversion_p (ctype, type))
  290. return;
  291. break;
  292. case EQ_EXPR:
  293. /* We could derive quite precise information from EQ_EXPR, however, such
  294. a guard is unlikely to appear, so we do not bother with handling
  295. it. */
  296. return;
  297. case NE_EXPR:
  298. /* NE_EXPR comparisons do not contain much of useful information, except for
  299. special case of comparing with the bounds of the type. */
  300. if (TREE_CODE (c1) != INTEGER_CST
  301. || !INTEGRAL_TYPE_P (type))
  302. return;
  303. /* Ensure that the condition speaks about an expression in the same type
  304. as X and Y. */
  305. ctype = TREE_TYPE (c0);
  306. if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
  307. return;
  308. c0 = fold_convert (type, c0);
  309. c1 = fold_convert (type, c1);
  310. if (TYPE_MIN_VALUE (type)
  311. && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
  312. {
  313. cmp = GT_EXPR;
  314. break;
  315. }
  316. if (TYPE_MAX_VALUE (type)
  317. && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
  318. {
  319. cmp = LT_EXPR;
  320. break;
  321. }
  322. return;
  323. default:
  324. return;
  325. }
  326. mpz_init (offc0);
  327. mpz_init (offc1);
  328. split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
  329. split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
  330. /* We are only interested in comparisons of expressions based on VARX and
  331. VARY. TODO -- we might also be able to derive some bounds from
  332. expressions containing just one of the variables. */
  333. if (operand_equal_p (varx, varc1, 0))
  334. {
  335. tmp = varc0; varc0 = varc1; varc1 = tmp;
  336. mpz_swap (offc0, offc1);
  337. cmp = swap_tree_comparison (cmp);
  338. }
  339. if (!operand_equal_p (varx, varc0, 0)
  340. || !operand_equal_p (vary, varc1, 0))
  341. goto end;
  342. mpz_init_set (loffx, offx);
  343. mpz_init_set (loffy, offy);
  344. if (cmp == GT_EXPR || cmp == GE_EXPR)
  345. {
  346. tmp = varx; varx = vary; vary = tmp;
  347. mpz_swap (offc0, offc1);
  348. mpz_swap (loffx, loffy);
  349. cmp = swap_tree_comparison (cmp);
  350. lbound = true;
  351. }
  352. /* If there is no overflow, the condition implies that
  353. (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
  354. The overflows and underflows may complicate things a bit; each
  355. overflow decreases the appropriate offset by M, and underflow
  356. increases it by M. The above inequality would not necessarily be
  357. true if
  358. -- VARX + OFFX underflows and VARX + OFFC0 does not, or
  359. VARX + OFFC0 overflows, but VARX + OFFX does not.
  360. This may only happen if OFFX < OFFC0.
  361. -- VARY + OFFY overflows and VARY + OFFC1 does not, or
  362. VARY + OFFC1 underflows and VARY + OFFY does not.
  363. This may only happen if OFFY > OFFC1. */
  364. if (no_wrap)
  365. {
  366. x_ok = true;
  367. y_ok = true;
  368. }
  369. else
  370. {
  371. x_ok = (integer_zerop (varx)
  372. || mpz_cmp (loffx, offc0) >= 0);
  373. y_ok = (integer_zerop (vary)
  374. || mpz_cmp (loffy, offc1) <= 0);
  375. }
  376. if (x_ok && y_ok)
  377. {
  378. mpz_init (bnd);
  379. mpz_sub (bnd, loffx, loffy);
  380. mpz_add (bnd, bnd, offc1);
  381. mpz_sub (bnd, bnd, offc0);
  382. if (cmp == LT_EXPR)
  383. mpz_sub_ui (bnd, bnd, 1);
  384. if (lbound)
  385. {
  386. mpz_neg (bnd, bnd);
  387. if (mpz_cmp (bnds->below, bnd) < 0)
  388. mpz_set (bnds->below, bnd);
  389. }
  390. else
  391. {
  392. if (mpz_cmp (bnd, bnds->up) < 0)
  393. mpz_set (bnds->up, bnd);
  394. }
  395. mpz_clear (bnd);
  396. }
  397. mpz_clear (loffx);
  398. mpz_clear (loffy);
  399. end:
  400. mpz_clear (offc0);
  401. mpz_clear (offc1);
  402. }
  403. /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
  404. The subtraction is considered to be performed in arbitrary precision,
  405. without overflows.
  406. We do not attempt to be too clever regarding the value ranges of X and
  407. Y; most of the time, they are just integers or ssa names offsetted by
  408. integer. However, we try to use the information contained in the
  409. comparisons before the loop (usually created by loop header copying). */
  410. static void
  411. bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
  412. {
  413. tree type = TREE_TYPE (x);
  414. tree varx, vary;
  415. mpz_t offx, offy;
  416. mpz_t minx, maxx, miny, maxy;
  417. int cnt = 0;
  418. edge e;
  419. basic_block bb;
  420. tree c0, c1;
  421. gimple cond;
  422. enum tree_code cmp;
  423. /* Get rid of unnecessary casts, but preserve the value of
  424. the expressions. */
  425. STRIP_SIGN_NOPS (x);
  426. STRIP_SIGN_NOPS (y);
  427. mpz_init (bnds->below);
  428. mpz_init (bnds->up);
  429. mpz_init (offx);
  430. mpz_init (offy);
  431. split_to_var_and_offset (x, &varx, offx);
  432. split_to_var_and_offset (y, &vary, offy);
  433. if (!integer_zerop (varx)
  434. && operand_equal_p (varx, vary, 0))
  435. {
  436. /* Special case VARX == VARY -- we just need to compare the
  437. offsets. The matters are a bit more complicated in the
  438. case addition of offsets may wrap. */
  439. bound_difference_of_offsetted_base (type, offx, offy, bnds);
  440. }
  441. else
  442. {
  443. /* Otherwise, use the value ranges to determine the initial
  444. estimates on below and up. */
  445. mpz_init (minx);
  446. mpz_init (maxx);
  447. mpz_init (miny);
  448. mpz_init (maxy);
  449. determine_value_range (loop, type, varx, offx, minx, maxx);
  450. determine_value_range (loop, type, vary, offy, miny, maxy);
  451. mpz_sub (bnds->below, minx, maxy);
  452. mpz_sub (bnds->up, maxx, miny);
  453. mpz_clear (minx);
  454. mpz_clear (maxx);
  455. mpz_clear (miny);
  456. mpz_clear (maxy);
  457. }
  458. /* If both X and Y are constants, we cannot get any more precise. */
  459. if (integer_zerop (varx) && integer_zerop (vary))
  460. goto end;
  461. /* Now walk the dominators of the loop header and use the entry
  462. guards to refine the estimates. */
  463. for (bb = loop->header;
  464. bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
  465. bb = get_immediate_dominator (CDI_DOMINATORS, bb))
  466. {
  467. if (!single_pred_p (bb))
  468. continue;
  469. e = single_pred_edge (bb);
  470. if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
  471. continue;
  472. cond = last_stmt (e->src);
  473. c0 = gimple_cond_lhs (cond);
  474. cmp = gimple_cond_code (cond);
  475. c1 = gimple_cond_rhs (cond);
  476. if (e->flags & EDGE_FALSE_VALUE)
  477. cmp = invert_tree_comparison (cmp, false);
  478. refine_bounds_using_guard (type, varx, offx, vary, offy,
  479. c0, cmp, c1, bnds);
  480. ++cnt;
  481. }
  482. end:
  483. mpz_clear (offx);
  484. mpz_clear (offy);
  485. }
  486. /* Update the bounds in BNDS that restrict the value of X to the bounds
  487. that restrict the value of X + DELTA. X can be obtained as a
  488. difference of two values in TYPE. */
  489. static void
  490. bounds_add (bounds *bnds, const widest_int &delta, tree type)
  491. {
  492. mpz_t mdelta, max;
  493. mpz_init (mdelta);
  494. wi::to_mpz (delta, mdelta, SIGNED);
  495. mpz_init (max);
  496. wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
  497. mpz_add (bnds->up, bnds->up, mdelta);
  498. mpz_add (bnds->below, bnds->below, mdelta);
  499. if (mpz_cmp (bnds->up, max) > 0)
  500. mpz_set (bnds->up, max);
  501. mpz_neg (max, max);
  502. if (mpz_cmp (bnds->below, max) < 0)
  503. mpz_set (bnds->below, max);
  504. mpz_clear (mdelta);
  505. mpz_clear (max);
  506. }
  507. /* Update the bounds in BNDS that restrict the value of X to the bounds
  508. that restrict the value of -X. */
  509. static void
  510. bounds_negate (bounds *bnds)
  511. {
  512. mpz_t tmp;
  513. mpz_init_set (tmp, bnds->up);
  514. mpz_neg (bnds->up, bnds->below);
  515. mpz_neg (bnds->below, tmp);
  516. mpz_clear (tmp);
  517. }
  518. /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
  519. static tree
  520. inverse (tree x, tree mask)
  521. {
  522. tree type = TREE_TYPE (x);
  523. tree rslt;
  524. unsigned ctr = tree_floor_log2 (mask);
  525. if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
  526. {
  527. unsigned HOST_WIDE_INT ix;
  528. unsigned HOST_WIDE_INT imask;
  529. unsigned HOST_WIDE_INT irslt = 1;
  530. gcc_assert (cst_and_fits_in_hwi (x));
  531. gcc_assert (cst_and_fits_in_hwi (mask));
  532. ix = int_cst_value (x);
  533. imask = int_cst_value (mask);
  534. for (; ctr; ctr--)
  535. {
  536. irslt *= ix;
  537. ix *= ix;
  538. }
  539. irslt &= imask;
  540. rslt = build_int_cst_type (type, irslt);
  541. }
  542. else
  543. {
  544. rslt = build_int_cst (type, 1);
  545. for (; ctr; ctr--)
  546. {
  547. rslt = int_const_binop (MULT_EXPR, rslt, x);
  548. x = int_const_binop (MULT_EXPR, x, x);
  549. }
  550. rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
  551. }
  552. return rslt;
  553. }
  554. /* Derives the upper bound BND on the number of executions of loop with exit
  555. condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
  556. the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
  557. that the loop ends through this exit, i.e., the induction variable ever
  558. reaches the value of C.
  559. The value C is equal to final - base, where final and base are the final and
  560. initial value of the actual induction variable in the analysed loop. BNDS
  561. bounds the value of this difference when computed in signed type with
  562. unbounded range, while the computation of C is performed in an unsigned
  563. type with the range matching the range of the type of the induction variable.
  564. In particular, BNDS.up contains an upper bound on C in the following cases:
  565. -- if the iv must reach its final value without overflow, i.e., if
  566. NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
  567. -- if final >= base, which we know to hold when BNDS.below >= 0. */
  568. static void
  569. number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
  570. bounds *bnds, bool exit_must_be_taken)
  571. {
  572. widest_int max;
  573. mpz_t d;
  574. tree type = TREE_TYPE (c);
  575. bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
  576. || mpz_sgn (bnds->below) >= 0);
  577. if (integer_onep (s)
  578. || (TREE_CODE (c) == INTEGER_CST
  579. && TREE_CODE (s) == INTEGER_CST
  580. && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
  581. || (TYPE_OVERFLOW_UNDEFINED (type)
  582. && multiple_of_p (type, c, s)))
  583. {
  584. /* If C is an exact multiple of S, then its value will be reached before
  585. the induction variable overflows (unless the loop is exited in some
  586. other way before). Note that the actual induction variable in the
  587. loop (which ranges from base to final instead of from 0 to C) may
  588. overflow, in which case BNDS.up will not be giving a correct upper
  589. bound on C; thus, BNDS_U_VALID had to be computed in advance. */
  590. no_overflow = true;
  591. exit_must_be_taken = true;
  592. }
  593. /* If the induction variable can overflow, the number of iterations is at
  594. most the period of the control variable (or infinite, but in that case
  595. the whole # of iterations analysis will fail). */
  596. if (!no_overflow)
  597. {
  598. max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
  599. wi::to_mpz (max, bnd, UNSIGNED);
  600. return;
  601. }
  602. /* Now we know that the induction variable does not overflow, so the loop
  603. iterates at most (range of type / S) times. */
  604. wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
  605. /* If the induction variable is guaranteed to reach the value of C before
  606. overflow, ... */
  607. if (exit_must_be_taken)
  608. {
  609. /* ... then we can strengthen this to C / S, and possibly we can use
  610. the upper bound on C given by BNDS. */
  611. if (TREE_CODE (c) == INTEGER_CST)
  612. wi::to_mpz (c, bnd, UNSIGNED);
  613. else if (bnds_u_valid)
  614. mpz_set (bnd, bnds->up);
  615. }
  616. mpz_init (d);
  617. wi::to_mpz (s, d, UNSIGNED);
  618. mpz_fdiv_q (bnd, bnd, d);
  619. mpz_clear (d);
  620. }
  621. /* Determines number of iterations of loop whose ending condition
  622. is IV <> FINAL. TYPE is the type of the iv. The number of
  623. iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
  624. we know that the exit must be taken eventually, i.e., that the IV
  625. ever reaches the value FINAL (we derived this earlier, and possibly set
  626. NITER->assumptions to make sure this is the case). BNDS contains the
  627. bounds on the difference FINAL - IV->base. */
  628. static bool
  629. number_of_iterations_ne (tree type, affine_iv *iv, tree final,
  630. struct tree_niter_desc *niter, bool exit_must_be_taken,
  631. bounds *bnds)
  632. {
  633. tree niter_type = unsigned_type_for (type);
  634. tree s, c, d, bits, assumption, tmp, bound;
  635. mpz_t max;
  636. niter->control = *iv;
  637. niter->bound = final;
  638. niter->cmp = NE_EXPR;
  639. /* Rearrange the terms so that we get inequality S * i <> C, with S
  640. positive. Also cast everything to the unsigned type. If IV does
  641. not overflow, BNDS bounds the value of C. Also, this is the
  642. case if the computation |FINAL - IV->base| does not overflow, i.e.,
  643. if BNDS->below in the result is nonnegative. */
  644. if (tree_int_cst_sign_bit (iv->step))
  645. {
  646. s = fold_convert (niter_type,
  647. fold_build1 (NEGATE_EXPR, type, iv->step));
  648. c = fold_build2 (MINUS_EXPR, niter_type,
  649. fold_convert (niter_type, iv->base),
  650. fold_convert (niter_type, final));
  651. bounds_negate (bnds);
  652. }
  653. else
  654. {
  655. s = fold_convert (niter_type, iv->step);
  656. c = fold_build2 (MINUS_EXPR, niter_type,
  657. fold_convert (niter_type, final),
  658. fold_convert (niter_type, iv->base));
  659. }
  660. mpz_init (max);
  661. number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
  662. exit_must_be_taken);
  663. niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
  664. TYPE_SIGN (niter_type));
  665. mpz_clear (max);
  666. /* First the trivial cases -- when the step is 1. */
  667. if (integer_onep (s))
  668. {
  669. niter->niter = c;
  670. return true;
  671. }
  672. /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
  673. is infinite. Otherwise, the number of iterations is
  674. (inverse(s/d) * (c/d)) mod (size of mode/d). */
  675. bits = num_ending_zeros (s);
  676. bound = build_low_bits_mask (niter_type,
  677. (TYPE_PRECISION (niter_type)
  678. - tree_to_uhwi (bits)));
  679. d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
  680. build_int_cst (niter_type, 1), bits);
  681. s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
  682. if (!exit_must_be_taken)
  683. {
  684. /* If we cannot assume that the exit is taken eventually, record the
  685. assumptions for divisibility of c. */
  686. assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
  687. assumption = fold_build2 (EQ_EXPR, boolean_type_node,
  688. assumption, build_int_cst (niter_type, 0));
  689. if (!integer_nonzerop (assumption))
  690. niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
  691. niter->assumptions, assumption);
  692. }
  693. c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
  694. tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
  695. niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
  696. return true;
  697. }
  698. /* Checks whether we can determine the final value of the control variable
  699. of the loop with ending condition IV0 < IV1 (computed in TYPE).
  700. DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
  701. of the step. The assumptions necessary to ensure that the computation
  702. of the final value does not overflow are recorded in NITER. If we
  703. find the final value, we adjust DELTA and return TRUE. Otherwise
  704. we return false. BNDS bounds the value of IV1->base - IV0->base,
  705. and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
  706. true if we know that the exit must be taken eventually. */
  707. static bool
  708. number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
  709. struct tree_niter_desc *niter,
  710. tree *delta, tree step,
  711. bool exit_must_be_taken, bounds *bnds)
  712. {
  713. tree niter_type = TREE_TYPE (step);
  714. tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
  715. tree tmod;
  716. mpz_t mmod;
  717. tree assumption = boolean_true_node, bound, noloop;
  718. bool ret = false, fv_comp_no_overflow;
  719. tree type1 = type;
  720. if (POINTER_TYPE_P (type))
  721. type1 = sizetype;
  722. if (TREE_CODE (mod) != INTEGER_CST)
  723. return false;
  724. if (integer_nonzerop (mod))
  725. mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
  726. tmod = fold_convert (type1, mod);
  727. mpz_init (mmod);
  728. wi::to_mpz (mod, mmod, UNSIGNED);
  729. mpz_neg (mmod, mmod);
  730. /* If the induction variable does not overflow and the exit is taken,
  731. then the computation of the final value does not overflow. This is
  732. also obviously the case if the new final value is equal to the
  733. current one. Finally, we postulate this for pointer type variables,
  734. as the code cannot rely on the object to that the pointer points being
  735. placed at the end of the address space (and more pragmatically,
  736. TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
  737. if (integer_zerop (mod) || POINTER_TYPE_P (type))
  738. fv_comp_no_overflow = true;
  739. else if (!exit_must_be_taken)
  740. fv_comp_no_overflow = false;
  741. else
  742. fv_comp_no_overflow =
  743. (iv0->no_overflow && integer_nonzerop (iv0->step))
  744. || (iv1->no_overflow && integer_nonzerop (iv1->step));
  745. if (integer_nonzerop (iv0->step))
  746. {
  747. /* The final value of the iv is iv1->base + MOD, assuming that this
  748. computation does not overflow, and that
  749. iv0->base <= iv1->base + MOD. */
  750. if (!fv_comp_no_overflow)
  751. {
  752. bound = fold_build2 (MINUS_EXPR, type1,
  753. TYPE_MAX_VALUE (type1), tmod);
  754. assumption = fold_build2 (LE_EXPR, boolean_type_node,
  755. iv1->base, bound);
  756. if (integer_zerop (assumption))
  757. goto end;
  758. }
  759. if (mpz_cmp (mmod, bnds->below) < 0)
  760. noloop = boolean_false_node;
  761. else if (POINTER_TYPE_P (type))
  762. noloop = fold_build2 (GT_EXPR, boolean_type_node,
  763. iv0->base,
  764. fold_build_pointer_plus (iv1->base, tmod));
  765. else
  766. noloop = fold_build2 (GT_EXPR, boolean_type_node,
  767. iv0->base,
  768. fold_build2 (PLUS_EXPR, type1,
  769. iv1->base, tmod));
  770. }
  771. else
  772. {
  773. /* The final value of the iv is iv0->base - MOD, assuming that this
  774. computation does not overflow, and that
  775. iv0->base - MOD <= iv1->base. */
  776. if (!fv_comp_no_overflow)
  777. {
  778. bound = fold_build2 (PLUS_EXPR, type1,
  779. TYPE_MIN_VALUE (type1), tmod);
  780. assumption = fold_build2 (GE_EXPR, boolean_type_node,
  781. iv0->base, bound);
  782. if (integer_zerop (assumption))
  783. goto end;
  784. }
  785. if (mpz_cmp (mmod, bnds->below) < 0)
  786. noloop = boolean_false_node;
  787. else if (POINTER_TYPE_P (type))
  788. noloop = fold_build2 (GT_EXPR, boolean_type_node,
  789. fold_build_pointer_plus (iv0->base,
  790. fold_build1 (NEGATE_EXPR,
  791. type1, tmod)),
  792. iv1->base);
  793. else
  794. noloop = fold_build2 (GT_EXPR, boolean_type_node,
  795. fold_build2 (MINUS_EXPR, type1,
  796. iv0->base, tmod),
  797. iv1->base);
  798. }
  799. if (!integer_nonzerop (assumption))
  800. niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
  801. niter->assumptions,
  802. assumption);
  803. if (!integer_zerop (noloop))
  804. niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
  805. niter->may_be_zero,
  806. noloop);
  807. bounds_add (bnds, wi::to_widest (mod), type);
  808. *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
  809. ret = true;
  810. end:
  811. mpz_clear (mmod);
  812. return ret;
  813. }
  814. /* Add assertions to NITER that ensure that the control variable of the loop
  815. with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
  816. are TYPE. Returns false if we can prove that there is an overflow, true
  817. otherwise. STEP is the absolute value of the step. */
  818. static bool
  819. assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
  820. struct tree_niter_desc *niter, tree step)
  821. {
  822. tree bound, d, assumption, diff;
  823. tree niter_type = TREE_TYPE (step);
  824. if (integer_nonzerop (iv0->step))
  825. {
  826. /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
  827. if (iv0->no_overflow)
  828. return true;
  829. /* If iv0->base is a constant, we can determine the last value before
  830. overflow precisely; otherwise we conservatively assume
  831. MAX - STEP + 1. */
  832. if (TREE_CODE (iv0->base) == INTEGER_CST)
  833. {
  834. d = fold_build2 (MINUS_EXPR, niter_type,
  835. fold_convert (niter_type, TYPE_MAX_VALUE (type)),
  836. fold_convert (niter_type, iv0->base));
  837. diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
  838. }
  839. else
  840. diff = fold_build2 (MINUS_EXPR, niter_type, step,
  841. build_int_cst (niter_type, 1));
  842. bound = fold_build2 (MINUS_EXPR, type,
  843. TYPE_MAX_VALUE (type), fold_convert (type, diff));
  844. assumption = fold_build2 (LE_EXPR, boolean_type_node,
  845. iv1->base, bound);
  846. }
  847. else
  848. {
  849. /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
  850. if (iv1->no_overflow)
  851. return true;
  852. if (TREE_CODE (iv1->base) == INTEGER_CST)
  853. {
  854. d = fold_build2 (MINUS_EXPR, niter_type,
  855. fold_convert (niter_type, iv1->base),
  856. fold_convert (niter_type, TYPE_MIN_VALUE (type)));
  857. diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
  858. }
  859. else
  860. diff = fold_build2 (MINUS_EXPR, niter_type, step,
  861. build_int_cst (niter_type, 1));
  862. bound = fold_build2 (PLUS_EXPR, type,
  863. TYPE_MIN_VALUE (type), fold_convert (type, diff));
  864. assumption = fold_build2 (GE_EXPR, boolean_type_node,
  865. iv0->base, bound);
  866. }
  867. if (integer_zerop (assumption))
  868. return false;
  869. if (!integer_nonzerop (assumption))
  870. niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
  871. niter->assumptions, assumption);
  872. iv0->no_overflow = true;
  873. iv1->no_overflow = true;
  874. return true;
  875. }
  876. /* Add an assumption to NITER that a loop whose ending condition
  877. is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
  878. bounds the value of IV1->base - IV0->base. */
  879. static void
  880. assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
  881. struct tree_niter_desc *niter, bounds *bnds)
  882. {
  883. tree assumption = boolean_true_node, bound, diff;
  884. tree mbz, mbzl, mbzr, type1;
  885. bool rolls_p, no_overflow_p;
  886. widest_int dstep;
  887. mpz_t mstep, max;
  888. /* We are going to compute the number of iterations as
  889. (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
  890. variant of TYPE. This formula only works if
  891. -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
  892. (where MAX is the maximum value of the unsigned variant of TYPE, and
  893. the computations in this formula are performed in full precision,
  894. i.e., without overflows).
  895. Usually, for loops with exit condition iv0->base + step * i < iv1->base,
  896. we have a condition of the form iv0->base - step < iv1->base before the loop,
  897. and for loops iv0->base < iv1->base - step * i the condition
  898. iv0->base < iv1->base + step, due to loop header copying, which enable us
  899. to prove the lower bound.
  900. The upper bound is more complicated. Unless the expressions for initial
  901. and final value themselves contain enough information, we usually cannot
  902. derive it from the context. */
  903. /* First check whether the answer does not follow from the bounds we gathered
  904. before. */
  905. if (integer_nonzerop (iv0->step))
  906. dstep = wi::to_widest (iv0->step);
  907. else
  908. {
  909. dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
  910. dstep = -dstep;
  911. }
  912. mpz_init (mstep);
  913. wi::to_mpz (dstep, mstep, UNSIGNED);
  914. mpz_neg (mstep, mstep);
  915. mpz_add_ui (mstep, mstep, 1);
  916. rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
  917. mpz_init (max);
  918. wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
  919. mpz_add (max, max, mstep);
  920. no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
  921. /* For pointers, only values lying inside a single object
  922. can be compared or manipulated by pointer arithmetics.
  923. Gcc in general does not allow or handle objects larger
  924. than half of the address space, hence the upper bound
  925. is satisfied for pointers. */
  926. || POINTER_TYPE_P (type));
  927. mpz_clear (mstep);
  928. mpz_clear (max);
  929. if (rolls_p && no_overflow_p)
  930. return;
  931. type1 = type;
  932. if (POINTER_TYPE_P (type))
  933. type1 = sizetype;
  934. /* Now the hard part; we must formulate the assumption(s) as expressions, and
  935. we must be careful not to introduce overflow. */
  936. if (integer_nonzerop (iv0->step))
  937. {
  938. diff = fold_build2 (MINUS_EXPR, type1,
  939. iv0->step, build_int_cst (type1, 1));
  940. /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
  941. 0 address never belongs to any object, we can assume this for
  942. pointers. */
  943. if (!POINTER_TYPE_P (type))
  944. {
  945. bound = fold_build2 (PLUS_EXPR, type1,
  946. TYPE_MIN_VALUE (type), diff);
  947. assumption = fold_build2 (GE_EXPR, boolean_type_node,
  948. iv0->base, bound);
  949. }
  950. /* And then we can compute iv0->base - diff, and compare it with
  951. iv1->base. */
  952. mbzl = fold_build2 (MINUS_EXPR, type1,
  953. fold_convert (type1, iv0->base), diff);
  954. mbzr = fold_convert (type1, iv1->base);
  955. }
  956. else
  957. {
  958. diff = fold_build2 (PLUS_EXPR, type1,
  959. iv1->step, build_int_cst (type1, 1));
  960. if (!POINTER_TYPE_P (type))
  961. {
  962. bound = fold_build2 (PLUS_EXPR, type1,
  963. TYPE_MAX_VALUE (type), diff);
  964. assumption = fold_build2 (LE_EXPR, boolean_type_node,
  965. iv1->base, bound);
  966. }
  967. mbzl = fold_convert (type1, iv0->base);
  968. mbzr = fold_build2 (MINUS_EXPR, type1,
  969. fold_convert (type1, iv1->base), diff);
  970. }
  971. if (!integer_nonzerop (assumption))
  972. niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
  973. niter->assumptions, assumption);
  974. if (!rolls_p)
  975. {
  976. mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
  977. niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
  978. niter->may_be_zero, mbz);
  979. }
  980. }
  981. /* Determines number of iterations of loop whose ending condition
  982. is IV0 < IV1. TYPE is the type of the iv. The number of
  983. iterations is stored to NITER. BNDS bounds the difference
  984. IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
  985. that the exit must be taken eventually. */
  986. static bool
  987. number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
  988. struct tree_niter_desc *niter,
  989. bool exit_must_be_taken, bounds *bnds)
  990. {
  991. tree niter_type = unsigned_type_for (type);
  992. tree delta, step, s;
  993. mpz_t mstep, tmp;
  994. if (integer_nonzerop (iv0->step))
  995. {
  996. niter->control = *iv0;
  997. niter->cmp = LT_EXPR;
  998. niter->bound = iv1->base;
  999. }
  1000. else
  1001. {
  1002. niter->control = *iv1;
  1003. niter->cmp = GT_EXPR;
  1004. niter->bound = iv0->base;
  1005. }
  1006. delta = fold_build2 (MINUS_EXPR, niter_type,
  1007. fold_convert (niter_type, iv1->base),
  1008. fold_convert (niter_type, iv0->base));
  1009. /* First handle the special case that the step is +-1. */
  1010. if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
  1011. || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
  1012. {
  1013. /* for (i = iv0->base; i < iv1->base; i++)
  1014. or
  1015. for (i = iv1->base; i > iv0->base; i--).
  1016. In both cases # of iterations is iv1->base - iv0->base, assuming that
  1017. iv1->base >= iv0->base.
  1018. First try to derive a lower bound on the value of
  1019. iv1->base - iv0->base, computed in full precision. If the difference
  1020. is nonnegative, we are done, otherwise we must record the
  1021. condition. */
  1022. if (mpz_sgn (bnds->below) < 0)
  1023. niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
  1024. iv1->base, iv0->base);
  1025. niter->niter = delta;
  1026. niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
  1027. TYPE_SIGN (niter_type));
  1028. return true;
  1029. }
  1030. if (integer_nonzerop (iv0->step))
  1031. step = fold_convert (niter_type, iv0->step);
  1032. else
  1033. step = fold_convert (niter_type,
  1034. fold_build1 (NEGATE_EXPR, type, iv1->step));
  1035. /* If we can determine the final value of the control iv exactly, we can
  1036. transform the condition to != comparison. In particular, this will be
  1037. the case if DELTA is constant. */
  1038. if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
  1039. exit_must_be_taken, bnds))
  1040. {
  1041. affine_iv zps;
  1042. zps.base = build_int_cst (niter_type, 0);
  1043. zps.step = step;
  1044. /* number_of_iterations_lt_to_ne will add assumptions that ensure that
  1045. zps does not overflow. */
  1046. zps.no_overflow = true;
  1047. return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
  1048. }
  1049. /* Make sure that the control iv does not overflow. */
  1050. if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
  1051. return false;
  1052. /* We determine the number of iterations as (delta + step - 1) / step. For
  1053. this to work, we must know that iv1->base >= iv0->base - step + 1,
  1054. otherwise the loop does not roll. */
  1055. assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
  1056. s = fold_build2 (MINUS_EXPR, niter_type,
  1057. step, build_int_cst (niter_type, 1));
  1058. delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
  1059. niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
  1060. mpz_init (mstep);
  1061. mpz_init (tmp);
  1062. wi::to_mpz (step, mstep, UNSIGNED);
  1063. mpz_add (tmp, bnds->up, mstep);
  1064. mpz_sub_ui (tmp, tmp, 1);
  1065. mpz_fdiv_q (tmp, tmp, mstep);
  1066. niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
  1067. TYPE_SIGN (niter_type));
  1068. mpz_clear (mstep);
  1069. mpz_clear (tmp);
  1070. return true;
  1071. }
  1072. /* Determines number of iterations of loop whose ending condition
  1073. is IV0 <= IV1. TYPE is the type of the iv. The number of
  1074. iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
  1075. we know that this condition must eventually become false (we derived this
  1076. earlier, and possibly set NITER->assumptions to make sure this
  1077. is the case). BNDS bounds the difference IV1->base - IV0->base. */
  1078. static bool
  1079. number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
  1080. struct tree_niter_desc *niter, bool exit_must_be_taken,
  1081. bounds *bnds)
  1082. {
  1083. tree assumption;
  1084. tree type1 = type;
  1085. if (POINTER_TYPE_P (type))
  1086. type1 = sizetype;
  1087. /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
  1088. IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
  1089. value of the type. This we must know anyway, since if it is
  1090. equal to this value, the loop rolls forever. We do not check
  1091. this condition for pointer type ivs, as the code cannot rely on
  1092. the object to that the pointer points being placed at the end of
  1093. the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
  1094. not defined for pointers). */
  1095. if (!exit_must_be_taken && !POINTER_TYPE_P (type))
  1096. {
  1097. if (integer_nonzerop (iv0->step))
  1098. assumption = fold_build2 (NE_EXPR, boolean_type_node,
  1099. iv1->base, TYPE_MAX_VALUE (type));
  1100. else
  1101. assumption = fold_build2 (NE_EXPR, boolean_type_node,
  1102. iv0->base, TYPE_MIN_VALUE (type));
  1103. if (integer_zerop (assumption))
  1104. return false;
  1105. if (!integer_nonzerop (assumption))
  1106. niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
  1107. niter->assumptions, assumption);
  1108. }
  1109. if (integer_nonzerop (iv0->step))
  1110. {
  1111. if (POINTER_TYPE_P (type))
  1112. iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
  1113. else
  1114. iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
  1115. build_int_cst (type1, 1));
  1116. }
  1117. else if (POINTER_TYPE_P (type))
  1118. iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
  1119. else
  1120. iv0->base = fold_build2 (MINUS_EXPR, type1,
  1121. iv0->base, build_int_cst (type1, 1));
  1122. bounds_add (bnds, 1, type1);
  1123. return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
  1124. bnds);
  1125. }
  1126. /* Dumps description of affine induction variable IV to FILE. */
  1127. static void
  1128. dump_affine_iv (FILE *file, affine_iv *iv)
  1129. {
  1130. if (!integer_zerop (iv->step))
  1131. fprintf (file, "[");
  1132. print_generic_expr (dump_file, iv->base, TDF_SLIM);
  1133. if (!integer_zerop (iv->step))
  1134. {
  1135. fprintf (file, ", + , ");
  1136. print_generic_expr (dump_file, iv->step, TDF_SLIM);
  1137. fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
  1138. }
  1139. }
  1140. /* Determine the number of iterations according to condition (for staying
  1141. inside loop) which compares two induction variables using comparison
  1142. operator CODE. The induction variable on left side of the comparison
  1143. is IV0, the right-hand side is IV1. Both induction variables must have
  1144. type TYPE, which must be an integer or pointer type. The steps of the
  1145. ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
  1146. LOOP is the loop whose number of iterations we are determining.
  1147. ONLY_EXIT is true if we are sure this is the only way the loop could be
  1148. exited (including possibly non-returning function calls, exceptions, etc.)
  1149. -- in this case we can use the information whether the control induction
  1150. variables can overflow or not in a more efficient way.
  1151. if EVERY_ITERATION is true, we know the test is executed on every iteration.
  1152. The results (number of iterations and assumptions as described in
  1153. comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
  1154. Returns false if it fails to determine number of iterations, true if it
  1155. was determined (possibly with some assumptions). */
  1156. static bool
  1157. number_of_iterations_cond (struct loop *loop,
  1158. tree type, affine_iv *iv0, enum tree_code code,
  1159. affine_iv *iv1, struct tree_niter_desc *niter,
  1160. bool only_exit, bool every_iteration)
  1161. {
  1162. bool exit_must_be_taken = false, ret;
  1163. bounds bnds;
  1164. /* If the test is not executed every iteration, wrapping may make the test
  1165. to pass again.
  1166. TODO: the overflow case can be still used as unreliable estimate of upper
  1167. bound. But we have no API to pass it down to number of iterations code
  1168. and, at present, it will not use it anyway. */
  1169. if (!every_iteration
  1170. && (!iv0->no_overflow || !iv1->no_overflow
  1171. || code == NE_EXPR || code == EQ_EXPR))
  1172. return false;
  1173. /* The meaning of these assumptions is this:
  1174. if !assumptions
  1175. then the rest of information does not have to be valid
  1176. if may_be_zero then the loop does not roll, even if
  1177. niter != 0. */
  1178. niter->assumptions = boolean_true_node;
  1179. niter->may_be_zero = boolean_false_node;
  1180. niter->niter = NULL_TREE;
  1181. niter->max = 0;
  1182. niter->bound = NULL_TREE;
  1183. niter->cmp = ERROR_MARK;
  1184. /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
  1185. the control variable is on lhs. */
  1186. if (code == GE_EXPR || code == GT_EXPR
  1187. || (code == NE_EXPR && integer_zerop (iv0->step)))
  1188. {
  1189. SWAP (iv0, iv1);
  1190. code = swap_tree_comparison (code);
  1191. }
  1192. if (POINTER_TYPE_P (type))
  1193. {
  1194. /* Comparison of pointers is undefined unless both iv0 and iv1 point
  1195. to the same object. If they do, the control variable cannot wrap
  1196. (as wrap around the bounds of memory will never return a pointer
  1197. that would be guaranteed to point to the same object, even if we
  1198. avoid undefined behavior by casting to size_t and back). */
  1199. iv0->no_overflow = true;
  1200. iv1->no_overflow = true;
  1201. }
  1202. /* If the control induction variable does not overflow and the only exit
  1203. from the loop is the one that we analyze, we know it must be taken
  1204. eventually. */
  1205. if (only_exit)
  1206. {
  1207. if (!integer_zerop (iv0->step) && iv0->no_overflow)
  1208. exit_must_be_taken = true;
  1209. else if (!integer_zerop (iv1->step) && iv1->no_overflow)
  1210. exit_must_be_taken = true;
  1211. }
  1212. /* We can handle the case when neither of the sides of the comparison is
  1213. invariant, provided that the test is NE_EXPR. This rarely occurs in
  1214. practice, but it is simple enough to manage. */
  1215. if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
  1216. {
  1217. tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
  1218. if (code != NE_EXPR)
  1219. return false;
  1220. iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
  1221. iv0->step, iv1->step);
  1222. iv0->no_overflow = false;
  1223. iv1->step = build_int_cst (step_type, 0);
  1224. iv1->no_overflow = true;
  1225. }
  1226. /* If the result of the comparison is a constant, the loop is weird. More
  1227. precise handling would be possible, but the situation is not common enough
  1228. to waste time on it. */
  1229. if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
  1230. return false;
  1231. /* Ignore loops of while (i-- < 10) type. */
  1232. if (code != NE_EXPR)
  1233. {
  1234. if (iv0->step && tree_int_cst_sign_bit (iv0->step))
  1235. return false;
  1236. if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
  1237. return false;
  1238. }
  1239. /* If the loop exits immediately, there is nothing to do. */
  1240. tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
  1241. if (tem && integer_zerop (tem))
  1242. {
  1243. niter->niter = build_int_cst (unsigned_type_for (type), 0);
  1244. niter->max = 0;
  1245. return true;
  1246. }
  1247. /* OK, now we know we have a senseful loop. Handle several cases, depending
  1248. on what comparison operator is used. */
  1249. bound_difference (loop, iv1->base, iv0->base, &bnds);
  1250. if (dump_file && (dump_flags & TDF_DETAILS))
  1251. {
  1252. fprintf (dump_file,
  1253. "Analyzing # of iterations of loop %d\n", loop->num);
  1254. fprintf (dump_file, " exit condition ");
  1255. dump_affine_iv (dump_file, iv0);
  1256. fprintf (dump_file, " %s ",
  1257. code == NE_EXPR ? "!="
  1258. : code == LT_EXPR ? "<"
  1259. : "<=");
  1260. dump_affine_iv (dump_file, iv1);
  1261. fprintf (dump_file, "\n");
  1262. fprintf (dump_file, " bounds on difference of bases: ");
  1263. mpz_out_str (dump_file, 10, bnds.below);
  1264. fprintf (dump_file, " ... ");
  1265. mpz_out_str (dump_file, 10, bnds.up);
  1266. fprintf (dump_file, "\n");
  1267. }
  1268. switch (code)
  1269. {
  1270. case NE_EXPR:
  1271. gcc_assert (integer_zerop (iv1->step));
  1272. ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
  1273. exit_must_be_taken, &bnds);
  1274. break;
  1275. case LT_EXPR:
  1276. ret = number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
  1277. &bnds);
  1278. break;
  1279. case LE_EXPR:
  1280. ret = number_of_iterations_le (type, iv0, iv1, niter, exit_must_be_taken,
  1281. &bnds);
  1282. break;
  1283. default:
  1284. gcc_unreachable ();
  1285. }
  1286. mpz_clear (bnds.up);
  1287. mpz_clear (bnds.below);
  1288. if (dump_file && (dump_flags & TDF_DETAILS))
  1289. {
  1290. if (ret)
  1291. {
  1292. fprintf (dump_file, " result:\n");
  1293. if (!integer_nonzerop (niter->assumptions))
  1294. {
  1295. fprintf (dump_file, " under assumptions ");
  1296. print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
  1297. fprintf (dump_file, "\n");
  1298. }
  1299. if (!integer_zerop (niter->may_be_zero))
  1300. {
  1301. fprintf (dump_file, " zero if ");
  1302. print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
  1303. fprintf (dump_file, "\n");
  1304. }
  1305. fprintf (dump_file, " # of iterations ");
  1306. print_generic_expr (dump_file, niter->niter, TDF_SLIM);
  1307. fprintf (dump_file, ", bounded by ");
  1308. print_decu (niter->max, dump_file);
  1309. fprintf (dump_file, "\n");
  1310. }
  1311. else
  1312. fprintf (dump_file, " failed\n\n");
  1313. }
  1314. return ret;
  1315. }
  1316. /* Substitute NEW for OLD in EXPR and fold the result. */
  1317. static tree
  1318. simplify_replace_tree (tree expr, tree old, tree new_tree)
  1319. {
  1320. unsigned i, n;
  1321. tree ret = NULL_TREE, e, se;
  1322. if (!expr)
  1323. return NULL_TREE;
  1324. /* Do not bother to replace constants. */
  1325. if (CONSTANT_CLASS_P (old))
  1326. return expr;
  1327. if (expr == old
  1328. || operand_equal_p (expr, old, 0))
  1329. return unshare_expr (new_tree);
  1330. if (!EXPR_P (expr))
  1331. return expr;
  1332. n = TREE_OPERAND_LENGTH (expr);
  1333. for (i = 0; i < n; i++)
  1334. {
  1335. e = TREE_OPERAND (expr, i);
  1336. se = simplify_replace_tree (e, old, new_tree);
  1337. if (e == se)
  1338. continue;
  1339. if (!ret)
  1340. ret = copy_node (expr);
  1341. TREE_OPERAND (ret, i) = se;
  1342. }
  1343. return (ret ? fold (ret) : expr);
  1344. }
  1345. /* Expand definitions of ssa names in EXPR as long as they are simple
  1346. enough, and return the new expression. If STOP is specified, stop
  1347. expanding if EXPR equals to it. */
  1348. tree
  1349. expand_simple_operations (tree expr, tree stop)
  1350. {
  1351. unsigned i, n;
  1352. tree ret = NULL_TREE, e, ee, e1;
  1353. enum tree_code code;
  1354. gimple stmt;
  1355. if (expr == NULL_TREE)
  1356. return expr;
  1357. if (is_gimple_min_invariant (expr))
  1358. return expr;
  1359. code = TREE_CODE (expr);
  1360. if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
  1361. {
  1362. n = TREE_OPERAND_LENGTH (expr);
  1363. for (i = 0; i < n; i++)
  1364. {
  1365. e = TREE_OPERAND (expr, i);
  1366. ee = expand_simple_operations (e, stop);
  1367. if (e == ee)
  1368. continue;
  1369. if (!ret)
  1370. ret = copy_node (expr);
  1371. TREE_OPERAND (ret, i) = ee;
  1372. }
  1373. if (!ret)
  1374. return expr;
  1375. fold_defer_overflow_warnings ();
  1376. ret = fold (ret);
  1377. fold_undefer_and_ignore_overflow_warnings ();
  1378. return ret;
  1379. }
  1380. /* Stop if it's not ssa name or the one we don't want to expand. */
  1381. if (TREE_CODE (expr) != SSA_NAME || expr == stop)
  1382. return expr;
  1383. stmt = SSA_NAME_DEF_STMT (expr);
  1384. if (gimple_code (stmt) == GIMPLE_PHI)
  1385. {
  1386. basic_block src, dest;
  1387. if (gimple_phi_num_args (stmt) != 1)
  1388. return expr;
  1389. e = PHI_ARG_DEF (stmt, 0);
  1390. /* Avoid propagating through loop exit phi nodes, which
  1391. could break loop-closed SSA form restrictions. */
  1392. dest = gimple_bb (stmt);
  1393. src = single_pred (dest);
  1394. if (TREE_CODE (e) == SSA_NAME
  1395. && src->loop_father != dest->loop_father)
  1396. return expr;
  1397. return expand_simple_operations (e, stop);
  1398. }
  1399. if (gimple_code (stmt) != GIMPLE_ASSIGN)
  1400. return expr;
  1401. /* Avoid expanding to expressions that contain SSA names that need
  1402. to take part in abnormal coalescing. */
  1403. ssa_op_iter iter;
  1404. FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
  1405. if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
  1406. return expr;
  1407. e = gimple_assign_rhs1 (stmt);
  1408. code = gimple_assign_rhs_code (stmt);
  1409. if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
  1410. {
  1411. if (is_gimple_min_invariant (e))
  1412. return e;
  1413. if (code == SSA_NAME)
  1414. return expand_simple_operations (e, stop);
  1415. return expr;
  1416. }
  1417. switch (code)
  1418. {
  1419. CASE_CONVERT:
  1420. /* Casts are simple. */
  1421. ee = expand_simple_operations (e, stop);
  1422. return fold_build1 (code, TREE_TYPE (expr), ee);
  1423. case PLUS_EXPR:
  1424. case MINUS_EXPR:
  1425. if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
  1426. && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
  1427. return expr;
  1428. /* Fallthru. */
  1429. case POINTER_PLUS_EXPR:
  1430. /* And increments and decrements by a constant are simple. */
  1431. e1 = gimple_assign_rhs2 (stmt);
  1432. if (!is_gimple_min_invariant (e1))
  1433. return expr;
  1434. ee = expand_simple_operations (e, stop);
  1435. return fold_build2 (code, TREE_TYPE (expr), ee, e1);
  1436. default:
  1437. return expr;
  1438. }
  1439. }
  1440. /* Tries to simplify EXPR using the condition COND. Returns the simplified
  1441. expression (or EXPR unchanged, if no simplification was possible). */
  1442. static tree
  1443. tree_simplify_using_condition_1 (tree cond, tree expr)
  1444. {
  1445. bool changed;
  1446. tree e, te, e0, e1, e2, notcond;
  1447. enum tree_code code = TREE_CODE (expr);
  1448. if (code == INTEGER_CST)
  1449. return expr;
  1450. if (code == TRUTH_OR_EXPR
  1451. || code == TRUTH_AND_EXPR
  1452. || code == COND_EXPR)
  1453. {
  1454. changed = false;
  1455. e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
  1456. if (TREE_OPERAND (expr, 0) != e0)
  1457. changed = true;
  1458. e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
  1459. if (TREE_OPERAND (expr, 1) != e1)
  1460. changed = true;
  1461. if (code == COND_EXPR)
  1462. {
  1463. e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
  1464. if (TREE_OPERAND (expr, 2) != e2)
  1465. changed = true;
  1466. }
  1467. else
  1468. e2 = NULL_TREE;
  1469. if (changed)
  1470. {
  1471. if (code == COND_EXPR)
  1472. expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
  1473. else
  1474. expr = fold_build2 (code, boolean_type_node, e0, e1);
  1475. }
  1476. return expr;
  1477. }
  1478. /* In case COND is equality, we may be able to simplify EXPR by copy/constant
  1479. propagation, and vice versa. Fold does not handle this, since it is
  1480. considered too expensive. */
  1481. if (TREE_CODE (cond) == EQ_EXPR)
  1482. {
  1483. e0 = TREE_OPERAND (cond, 0);
  1484. e1 = TREE_OPERAND (cond, 1);
  1485. /* We know that e0 == e1. Check whether we cannot simplify expr
  1486. using this fact. */
  1487. e = simplify_replace_tree (expr, e0, e1);
  1488. if (integer_zerop (e) || integer_nonzerop (e))
  1489. return e;
  1490. e = simplify_replace_tree (expr, e1, e0);
  1491. if (integer_zerop (e) || integer_nonzerop (e))
  1492. return e;
  1493. }
  1494. if (TREE_CODE (expr) == EQ_EXPR)
  1495. {
  1496. e0 = TREE_OPERAND (expr, 0);
  1497. e1 = TREE_OPERAND (expr, 1);
  1498. /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
  1499. e = simplify_replace_tree (cond, e0, e1);
  1500. if (integer_zerop (e))
  1501. return e;
  1502. e = simplify_replace_tree (cond, e1, e0);
  1503. if (integer_zerop (e))
  1504. return e;
  1505. }
  1506. if (TREE_CODE (expr) == NE_EXPR)
  1507. {
  1508. e0 = TREE_OPERAND (expr, 0);
  1509. e1 = TREE_OPERAND (expr, 1);
  1510. /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
  1511. e = simplify_replace_tree (cond, e0, e1);
  1512. if (integer_zerop (e))
  1513. return boolean_true_node;
  1514. e = simplify_replace_tree (cond, e1, e0);
  1515. if (integer_zerop (e))
  1516. return boolean_true_node;
  1517. }
  1518. te = expand_simple_operations (expr);
  1519. /* Check whether COND ==> EXPR. */
  1520. notcond = invert_truthvalue (cond);
  1521. e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
  1522. if (e && integer_nonzerop (e))
  1523. return e;
  1524. /* Check whether COND ==> not EXPR. */
  1525. e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
  1526. if (e && integer_zerop (e))
  1527. return e;
  1528. return expr;
  1529. }
  1530. /* Tries to simplify EXPR using the condition COND. Returns the simplified
  1531. expression (or EXPR unchanged, if no simplification was possible).
  1532. Wrapper around tree_simplify_using_condition_1 that ensures that chains
  1533. of simple operations in definitions of ssa names in COND are expanded,
  1534. so that things like casts or incrementing the value of the bound before
  1535. the loop do not cause us to fail. */
  1536. static tree
  1537. tree_simplify_using_condition (tree cond, tree expr)
  1538. {
  1539. cond = expand_simple_operations (cond);
  1540. return tree_simplify_using_condition_1 (cond, expr);
  1541. }
  1542. /* Tries to simplify EXPR using the conditions on entry to LOOP.
  1543. Returns the simplified expression (or EXPR unchanged, if no
  1544. simplification was possible).*/
  1545. static tree
  1546. simplify_using_initial_conditions (struct loop *loop, tree expr)
  1547. {
  1548. edge e;
  1549. basic_block bb;
  1550. gimple stmt;
  1551. tree cond;
  1552. int cnt = 0;
  1553. if (TREE_CODE (expr) == INTEGER_CST)
  1554. return expr;
  1555. /* Limit walking the dominators to avoid quadraticness in
  1556. the number of BBs times the number of loops in degenerate
  1557. cases. */
  1558. for (bb = loop->header;
  1559. bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
  1560. bb = get_immediate_dominator (CDI_DOMINATORS, bb))
  1561. {
  1562. if (!single_pred_p (bb))
  1563. continue;
  1564. e = single_pred_edge (bb);
  1565. if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
  1566. continue;
  1567. stmt = last_stmt (e->src);
  1568. cond = fold_build2 (gimple_cond_code (stmt),
  1569. boolean_type_node,
  1570. gimple_cond_lhs (stmt),
  1571. gimple_cond_rhs (stmt));
  1572. if (e->flags & EDGE_FALSE_VALUE)
  1573. cond = invert_truthvalue (cond);
  1574. expr = tree_simplify_using_condition (cond, expr);
  1575. ++cnt;
  1576. }
  1577. return expr;
  1578. }
  1579. /* Tries to simplify EXPR using the evolutions of the loop invariants
  1580. in the superloops of LOOP. Returns the simplified expression
  1581. (or EXPR unchanged, if no simplification was possible). */
  1582. static tree
  1583. simplify_using_outer_evolutions (struct loop *loop, tree expr)
  1584. {
  1585. enum tree_code code = TREE_CODE (expr);
  1586. bool changed;
  1587. tree e, e0, e1, e2;
  1588. if (is_gimple_min_invariant (expr))
  1589. return expr;
  1590. if (code == TRUTH_OR_EXPR
  1591. || code == TRUTH_AND_EXPR
  1592. || code == COND_EXPR)
  1593. {
  1594. changed = false;
  1595. e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
  1596. if (TREE_OPERAND (expr, 0) != e0)
  1597. changed = true;
  1598. e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
  1599. if (TREE_OPERAND (expr, 1) != e1)
  1600. changed = true;
  1601. if (code == COND_EXPR)
  1602. {
  1603. e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
  1604. if (TREE_OPERAND (expr, 2) != e2)
  1605. changed = true;
  1606. }
  1607. else
  1608. e2 = NULL_TREE;
  1609. if (changed)
  1610. {
  1611. if (code == COND_EXPR)
  1612. expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
  1613. else
  1614. expr = fold_build2 (code, boolean_type_node, e0, e1);
  1615. }
  1616. return expr;
  1617. }
  1618. e = instantiate_parameters (loop, expr);
  1619. if (is_gimple_min_invariant (e))
  1620. return e;
  1621. return expr;
  1622. }
  1623. /* Returns true if EXIT is the only possible exit from LOOP. */
  1624. bool
  1625. loop_only_exit_p (const struct loop *loop, const_edge exit)
  1626. {
  1627. basic_block *body;
  1628. gimple_stmt_iterator bsi;
  1629. unsigned i;
  1630. gimple call;
  1631. if (exit != single_exit (loop))
  1632. return false;
  1633. body = get_loop_body (loop);
  1634. for (i = 0; i < loop->num_nodes; i++)
  1635. {
  1636. for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
  1637. {
  1638. call = gsi_stmt (bsi);
  1639. if (gimple_code (call) != GIMPLE_CALL)
  1640. continue;
  1641. if (gimple_has_side_effects (call))
  1642. {
  1643. free (body);
  1644. return false;
  1645. }
  1646. }
  1647. }
  1648. free (body);
  1649. return true;
  1650. }
  1651. /* Stores description of number of iterations of LOOP derived from
  1652. EXIT (an exit edge of the LOOP) in NITER. Returns true if some
  1653. useful information could be derived (and fields of NITER has
  1654. meaning described in comments at struct tree_niter_desc
  1655. declaration), false otherwise. If WARN is true and
  1656. -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
  1657. potentially unsafe assumptions.
  1658. When EVERY_ITERATION is true, only tests that are known to be executed
  1659. every iteration are considered (i.e. only test that alone bounds the loop).
  1660. */
  1661. bool
  1662. number_of_iterations_exit (struct loop *loop, edge exit,
  1663. struct tree_niter_desc *niter,
  1664. bool warn, bool every_iteration)
  1665. {
  1666. gimple last;
  1667. gcond *stmt;
  1668. tree type;
  1669. tree op0, op1;
  1670. enum tree_code code;
  1671. affine_iv iv0, iv1;
  1672. bool safe;
  1673. safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
  1674. if (every_iteration && !safe)
  1675. return false;
  1676. niter->assumptions = boolean_false_node;
  1677. last = last_stmt (exit->src);
  1678. if (!last)
  1679. return false;
  1680. stmt = dyn_cast <gcond *> (last);
  1681. if (!stmt)
  1682. return false;
  1683. /* We want the condition for staying inside loop. */
  1684. code = gimple_cond_code (stmt);
  1685. if (exit->flags & EDGE_TRUE_VALUE)
  1686. code = invert_tree_comparison (code, false);
  1687. switch (code)
  1688. {
  1689. case GT_EXPR:
  1690. case GE_EXPR:
  1691. case LT_EXPR:
  1692. case LE_EXPR:
  1693. case NE_EXPR:
  1694. break;
  1695. default:
  1696. return false;
  1697. }
  1698. op0 = gimple_cond_lhs (stmt);
  1699. op1 = gimple_cond_rhs (stmt);
  1700. type = TREE_TYPE (op0);
  1701. if (TREE_CODE (type) != INTEGER_TYPE
  1702. && !POINTER_TYPE_P (type))
  1703. return false;
  1704. if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
  1705. return false;
  1706. if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
  1707. return false;
  1708. /* We don't want to see undefined signed overflow warnings while
  1709. computing the number of iterations. */
  1710. fold_defer_overflow_warnings ();
  1711. iv0.base = expand_simple_operations (iv0.base);
  1712. iv1.base = expand_simple_operations (iv1.base);
  1713. if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
  1714. loop_only_exit_p (loop, exit), safe))
  1715. {
  1716. fold_undefer_and_ignore_overflow_warnings ();
  1717. return false;
  1718. }
  1719. if (optimize >= 3)
  1720. {
  1721. niter->assumptions = simplify_using_outer_evolutions (loop,
  1722. niter->assumptions);
  1723. niter->may_be_zero = simplify_using_outer_evolutions (loop,
  1724. niter->may_be_zero);
  1725. niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
  1726. }
  1727. niter->assumptions
  1728. = simplify_using_initial_conditions (loop,
  1729. niter->assumptions);
  1730. niter->may_be_zero
  1731. = simplify_using_initial_conditions (loop,
  1732. niter->may_be_zero);
  1733. fold_undefer_and_ignore_overflow_warnings ();
  1734. /* If NITER has simplified into a constant, update MAX. */
  1735. if (TREE_CODE (niter->niter) == INTEGER_CST)
  1736. niter->max = wi::to_widest (niter->niter);
  1737. if (integer_onep (niter->assumptions))
  1738. return true;
  1739. /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
  1740. But if we can prove that there is overflow or some other source of weird
  1741. behavior, ignore the loop even with -funsafe-loop-optimizations. */
  1742. if (integer_zerop (niter->assumptions) || !single_exit (loop))
  1743. return false;
  1744. if (flag_unsafe_loop_optimizations)
  1745. niter->assumptions = boolean_true_node;
  1746. if (warn)
  1747. {
  1748. const char *wording;
  1749. location_t loc = gimple_location (stmt);
  1750. /* We can provide a more specific warning if one of the operator is
  1751. constant and the other advances by +1 or -1. */
  1752. if (!integer_zerop (iv1.step)
  1753. ? (integer_zerop (iv0.step)
  1754. && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
  1755. : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
  1756. wording =
  1757. flag_unsafe_loop_optimizations
  1758. ? N_("assuming that the loop is not infinite")
  1759. : N_("cannot optimize possibly infinite loops");
  1760. else
  1761. wording =
  1762. flag_unsafe_loop_optimizations
  1763. ? N_("assuming that the loop counter does not overflow")
  1764. : N_("cannot optimize loop, the loop counter may overflow");
  1765. warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
  1766. OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
  1767. }
  1768. return flag_unsafe_loop_optimizations;
  1769. }
  1770. /* Try to determine the number of iterations of LOOP. If we succeed,
  1771. expression giving number of iterations is returned and *EXIT is
  1772. set to the edge from that the information is obtained. Otherwise
  1773. chrec_dont_know is returned. */
  1774. tree
  1775. find_loop_niter (struct loop *loop, edge *exit)
  1776. {
  1777. unsigned i;
  1778. vec<edge> exits = get_loop_exit_edges (loop);
  1779. edge ex;
  1780. tree niter = NULL_TREE, aniter;
  1781. struct tree_niter_desc desc;
  1782. *exit = NULL;
  1783. FOR_EACH_VEC_ELT (exits, i, ex)
  1784. {
  1785. if (!number_of_iterations_exit (loop, ex, &desc, false))
  1786. continue;
  1787. if (integer_nonzerop (desc.may_be_zero))
  1788. {
  1789. /* We exit in the first iteration through this exit.
  1790. We won't find anything better. */
  1791. niter = build_int_cst (unsigned_type_node, 0);
  1792. *exit = ex;
  1793. break;
  1794. }
  1795. if (!integer_zerop (desc.may_be_zero))
  1796. continue;
  1797. aniter = desc.niter;
  1798. if (!niter)
  1799. {
  1800. /* Nothing recorded yet. */
  1801. niter = aniter;
  1802. *exit = ex;
  1803. continue;
  1804. }
  1805. /* Prefer constants, the lower the better. */
  1806. if (TREE_CODE (aniter) != INTEGER_CST)
  1807. continue;
  1808. if (TREE_CODE (niter) != INTEGER_CST)
  1809. {
  1810. niter = aniter;
  1811. *exit = ex;
  1812. continue;
  1813. }
  1814. if (tree_int_cst_lt (aniter, niter))
  1815. {
  1816. niter = aniter;
  1817. *exit = ex;
  1818. continue;
  1819. }
  1820. }
  1821. exits.release ();
  1822. return niter ? niter : chrec_dont_know;
  1823. }
  1824. /* Return true if loop is known to have bounded number of iterations. */
  1825. bool
  1826. finite_loop_p (struct loop *loop)
  1827. {
  1828. widest_int nit;
  1829. int flags;
  1830. if (flag_unsafe_loop_optimizations)
  1831. return true;
  1832. flags = flags_from_decl_or_type (current_function_decl);
  1833. if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
  1834. {
  1835. if (dump_file && (dump_flags & TDF_DETAILS))
  1836. fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
  1837. loop->num);
  1838. return true;
  1839. }
  1840. if (loop->any_upper_bound
  1841. || max_loop_iterations (loop, &nit))
  1842. {
  1843. if (dump_file && (dump_flags & TDF_DETAILS))
  1844. fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
  1845. loop->num);
  1846. return true;
  1847. }
  1848. return false;
  1849. }
  1850. /*
  1851. Analysis of a number of iterations of a loop by a brute-force evaluation.
  1852. */
  1853. /* Bound on the number of iterations we try to evaluate. */
  1854. #define MAX_ITERATIONS_TO_TRACK \
  1855. ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
  1856. /* Returns the loop phi node of LOOP such that ssa name X is derived from its
  1857. result by a chain of operations such that all but exactly one of their
  1858. operands are constants. */
  1859. static gphi *
  1860. chain_of_csts_start (struct loop *loop, tree x)
  1861. {
  1862. gimple stmt = SSA_NAME_DEF_STMT (x);
  1863. tree use;
  1864. basic_block bb = gimple_bb (stmt);
  1865. enum tree_code code;
  1866. if (!bb
  1867. || !flow_bb_inside_loop_p (loop, bb))
  1868. return NULL;
  1869. if (gimple_code (stmt) == GIMPLE_PHI)
  1870. {
  1871. if (bb == loop->header)
  1872. return as_a <gphi *> (stmt);
  1873. return NULL;
  1874. }
  1875. if (gimple_code (stmt) != GIMPLE_ASSIGN
  1876. || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
  1877. return NULL;
  1878. code = gimple_assign_rhs_code (stmt);
  1879. if (gimple_references_memory_p (stmt)
  1880. || TREE_CODE_CLASS (code) == tcc_reference
  1881. || (code == ADDR_EXPR
  1882. && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
  1883. return NULL;
  1884. use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
  1885. if (use == NULL_TREE)
  1886. return NULL;
  1887. return chain_of_csts_start (loop, use);
  1888. }
  1889. /* Determines whether the expression X is derived from a result of a phi node
  1890. in header of LOOP such that
  1891. * the derivation of X consists only from operations with constants
  1892. * the initial value of the phi node is constant
  1893. * the value of the phi node in the next iteration can be derived from the
  1894. value in the current iteration by a chain of operations with constants.
  1895. If such phi node exists, it is returned, otherwise NULL is returned. */
  1896. static gphi *
  1897. get_base_for (struct loop *loop, tree x)
  1898. {
  1899. gphi *phi;
  1900. tree init, next;
  1901. if (is_gimple_min_invariant (x))
  1902. return NULL;
  1903. phi = chain_of_csts_start (loop, x);
  1904. if (!phi)
  1905. return NULL;
  1906. init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
  1907. next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
  1908. if (TREE_CODE (next) != SSA_NAME)
  1909. return NULL;
  1910. if (!is_gimple_min_invariant (init))
  1911. return NULL;
  1912. if (chain_of_csts_start (loop, next) != phi)
  1913. return NULL;
  1914. return phi;
  1915. }
  1916. /* Given an expression X, then
  1917. * if X is NULL_TREE, we return the constant BASE.
  1918. * otherwise X is a SSA name, whose value in the considered loop is derived
  1919. by a chain of operations with constant from a result of a phi node in
  1920. the header of the loop. Then we return value of X when the value of the
  1921. result of this phi node is given by the constant BASE. */
  1922. static tree
  1923. get_val_for (tree x, tree base)
  1924. {
  1925. gimple stmt;
  1926. gcc_checking_assert (is_gimple_min_invariant (base));
  1927. if (!x)
  1928. return base;
  1929. stmt = SSA_NAME_DEF_STMT (x);
  1930. if (gimple_code (stmt) == GIMPLE_PHI)
  1931. return base;
  1932. gcc_checking_assert (is_gimple_assign (stmt));
  1933. /* STMT must be either an assignment of a single SSA name or an
  1934. expression involving an SSA name and a constant. Try to fold that
  1935. expression using the value for the SSA name. */
  1936. if (gimple_assign_ssa_name_copy_p (stmt))
  1937. return get_val_for (gimple_assign_rhs1 (stmt), base);
  1938. else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
  1939. && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
  1940. {
  1941. return fold_build1 (gimple_assign_rhs_code (stmt),
  1942. gimple_expr_type (stmt),
  1943. get_val_for (gimple_assign_rhs1 (stmt), base));
  1944. }
  1945. else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
  1946. {
  1947. tree rhs1 = gimple_assign_rhs1 (stmt);
  1948. tree rhs2 = gimple_assign_rhs2 (stmt);
  1949. if (TREE_CODE (rhs1) == SSA_NAME)
  1950. rhs1 = get_val_for (rhs1, base);
  1951. else if (TREE_CODE (rhs2) == SSA_NAME)
  1952. rhs2 = get_val_for (rhs2, base);
  1953. else
  1954. gcc_unreachable ();
  1955. return fold_build2 (gimple_assign_rhs_code (stmt),
  1956. gimple_expr_type (stmt), rhs1, rhs2);
  1957. }
  1958. else
  1959. gcc_unreachable ();
  1960. }
  1961. /* Tries to count the number of iterations of LOOP till it exits by EXIT
  1962. by brute force -- i.e. by determining the value of the operands of the
  1963. condition at EXIT in first few iterations of the loop (assuming that
  1964. these values are constant) and determining the first one in that the
  1965. condition is not satisfied. Returns the constant giving the number
  1966. of the iterations of LOOP if successful, chrec_dont_know otherwise. */
  1967. tree
  1968. loop_niter_by_eval (struct loop *loop, edge exit)
  1969. {
  1970. tree acnd;
  1971. tree op[2], val[2], next[2], aval[2];
  1972. gphi *phi;
  1973. gimple cond;
  1974. unsigned i, j;
  1975. enum tree_code cmp;
  1976. cond = last_stmt (exit->src);
  1977. if (!cond || gimple_code (cond) != GIMPLE_COND)
  1978. return chrec_dont_know;
  1979. cmp = gimple_cond_code (cond);
  1980. if (exit->flags & EDGE_TRUE_VALUE)
  1981. cmp = invert_tree_comparison (cmp, false);
  1982. switch (cmp)
  1983. {
  1984. case EQ_EXPR:
  1985. case NE_EXPR:
  1986. case GT_EXPR:
  1987. case GE_EXPR:
  1988. case LT_EXPR:
  1989. case LE_EXPR:
  1990. op[0] = gimple_cond_lhs (cond);
  1991. op[1] = gimple_cond_rhs (cond);
  1992. break;
  1993. default:
  1994. return chrec_dont_know;
  1995. }
  1996. for (j = 0; j < 2; j++)
  1997. {
  1998. if (is_gimple_min_invariant (op[j]))
  1999. {
  2000. val[j] = op[j];
  2001. next[j] = NULL_TREE;
  2002. op[j] = NULL_TREE;
  2003. }
  2004. else
  2005. {
  2006. phi = get_base_for (loop, op[j]);
  2007. if (!phi)
  2008. return chrec_dont_know;
  2009. val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
  2010. next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
  2011. }
  2012. }
  2013. /* Don't issue signed overflow warnings. */
  2014. fold_defer_overflow_warnings ();
  2015. for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
  2016. {
  2017. for (j = 0; j < 2; j++)
  2018. aval[j] = get_val_for (op[j], val[j]);
  2019. acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
  2020. if (acnd && integer_zerop (acnd))
  2021. {
  2022. fold_undefer_and_ignore_overflow_warnings ();
  2023. if (dump_file && (dump_flags & TDF_DETAILS))
  2024. fprintf (dump_file,
  2025. "Proved that loop %d iterates %d times using brute force.\n",
  2026. loop->num, i);
  2027. return build_int_cst (unsigned_type_node, i);
  2028. }
  2029. for (j = 0; j < 2; j++)
  2030. {
  2031. val[j] = get_val_for (next[j], val[j]);
  2032. if (!is_gimple_min_invariant (val[j]))
  2033. {
  2034. fold_undefer_and_ignore_overflow_warnings ();
  2035. return chrec_dont_know;
  2036. }
  2037. }
  2038. }
  2039. fold_undefer_and_ignore_overflow_warnings ();
  2040. return chrec_dont_know;
  2041. }
  2042. /* Finds the exit of the LOOP by that the loop exits after a constant
  2043. number of iterations and stores the exit edge to *EXIT. The constant
  2044. giving the number of iterations of LOOP is returned. The number of
  2045. iterations is determined using loop_niter_by_eval (i.e. by brute force
  2046. evaluation). If we are unable to find the exit for that loop_niter_by_eval
  2047. determines the number of iterations, chrec_dont_know is returned. */
  2048. tree
  2049. find_loop_niter_by_eval (struct loop *loop, edge *exit)
  2050. {
  2051. unsigned i;
  2052. vec<edge> exits = get_loop_exit_edges (loop);
  2053. edge ex;
  2054. tree niter = NULL_TREE, aniter;
  2055. *exit = NULL;
  2056. /* Loops with multiple exits are expensive to handle and less important. */
  2057. if (!flag_expensive_optimizations
  2058. && exits.length () > 1)
  2059. {
  2060. exits.release ();
  2061. return chrec_dont_know;
  2062. }
  2063. FOR_EACH_VEC_ELT (exits, i, ex)
  2064. {
  2065. if (!just_once_each_iteration_p (loop, ex->src))
  2066. continue;
  2067. aniter = loop_niter_by_eval (loop, ex);
  2068. if (chrec_contains_undetermined (aniter))
  2069. continue;
  2070. if (niter
  2071. && !tree_int_cst_lt (aniter, niter))
  2072. continue;
  2073. niter = aniter;
  2074. *exit = ex;
  2075. }
  2076. exits.release ();
  2077. return niter ? niter : chrec_dont_know;
  2078. }
  2079. /*
  2080. Analysis of upper bounds on number of iterations of a loop.
  2081. */
  2082. static widest_int derive_constant_upper_bound_ops (tree, tree,
  2083. enum tree_code, tree);
  2084. /* Returns a constant upper bound on the value of the right-hand side of
  2085. an assignment statement STMT. */
  2086. static widest_int
  2087. derive_constant_upper_bound_assign (gimple stmt)
  2088. {
  2089. enum tree_code code = gimple_assign_rhs_code (stmt);
  2090. tree op0 = gimple_assign_rhs1 (stmt);
  2091. tree op1 = gimple_assign_rhs2 (stmt);
  2092. return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
  2093. op0, code, op1);
  2094. }
  2095. /* Returns a constant upper bound on the value of expression VAL. VAL
  2096. is considered to be unsigned. If its type is signed, its value must
  2097. be nonnegative. */
  2098. static widest_int
  2099. derive_constant_upper_bound (tree val)
  2100. {
  2101. enum tree_code code;
  2102. tree op0, op1;
  2103. extract_ops_from_tree (val, &code, &op0, &op1);
  2104. return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
  2105. }
  2106. /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
  2107. whose type is TYPE. The expression is considered to be unsigned. If
  2108. its type is signed, its value must be nonnegative. */
  2109. static widest_int
  2110. derive_constant_upper_bound_ops (tree type, tree op0,
  2111. enum tree_code code, tree op1)
  2112. {
  2113. tree subtype, maxt;
  2114. widest_int bnd, max, mmax, cst;
  2115. gimple stmt;
  2116. if (INTEGRAL_TYPE_P (type))
  2117. maxt = TYPE_MAX_VALUE (type);
  2118. else
  2119. maxt = upper_bound_in_type (type, type);
  2120. max = wi::to_widest (maxt);
  2121. switch (code)
  2122. {
  2123. case INTEGER_CST:
  2124. return wi::to_widest (op0);
  2125. CASE_CONVERT:
  2126. subtype = TREE_TYPE (op0);
  2127. if (!TYPE_UNSIGNED (subtype)
  2128. /* If TYPE is also signed, the fact that VAL is nonnegative implies
  2129. that OP0 is nonnegative. */
  2130. && TYPE_UNSIGNED (type)
  2131. && !tree_expr_nonnegative_p (op0))
  2132. {
  2133. /* If we cannot prove that the casted expression is nonnegative,
  2134. we cannot establish more useful upper bound than the precision
  2135. of the type gives us. */
  2136. return max;
  2137. }
  2138. /* We now know that op0 is an nonnegative value. Try deriving an upper
  2139. bound for it. */
  2140. bnd = derive_constant_upper_bound (op0);
  2141. /* If the bound does not fit in TYPE, max. value of TYPE could be
  2142. attained. */
  2143. if (wi::ltu_p (max, bnd))
  2144. return max;
  2145. return bnd;
  2146. case PLUS_EXPR:
  2147. case POINTER_PLUS_EXPR:
  2148. case MINUS_EXPR:
  2149. if (TREE_CODE (op1) != INTEGER_CST
  2150. || !tree_expr_nonnegative_p (op0))
  2151. return max;
  2152. /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
  2153. choose the most logical way how to treat this constant regardless
  2154. of the signedness of the type. */
  2155. cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
  2156. if (code != MINUS_EXPR)
  2157. cst = -cst;
  2158. bnd = derive_constant_upper_bound (op0);
  2159. if (wi::neg_p (cst))
  2160. {
  2161. cst = -cst;
  2162. /* Avoid CST == 0x80000... */
  2163. if (wi::neg_p (cst))
  2164. return max;;
  2165. /* OP0 + CST. We need to check that
  2166. BND <= MAX (type) - CST. */
  2167. mmax -= cst;
  2168. if (wi::ltu_p (bnd, max))
  2169. return max;
  2170. return bnd + cst;
  2171. }
  2172. else
  2173. {
  2174. /* OP0 - CST, where CST >= 0.
  2175. If TYPE is signed, we have already verified that OP0 >= 0, and we
  2176. know that the result is nonnegative. This implies that
  2177. VAL <= BND - CST.
  2178. If TYPE is unsigned, we must additionally know that OP0 >= CST,
  2179. otherwise the operation underflows.
  2180. */
  2181. /* This should only happen if the type is unsigned; however, for
  2182. buggy programs that use overflowing signed arithmetics even with
  2183. -fno-wrapv, this condition may also be true for signed values. */
  2184. if (wi::ltu_p (bnd, cst))
  2185. return max;
  2186. if (TYPE_UNSIGNED (type))
  2187. {
  2188. tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
  2189. wide_int_to_tree (type, cst));
  2190. if (!tem || integer_nonzerop (tem))
  2191. return max;
  2192. }
  2193. bnd -= cst;
  2194. }
  2195. return bnd;
  2196. case FLOOR_DIV_EXPR:
  2197. case EXACT_DIV_EXPR:
  2198. if (TREE_CODE (op1) != INTEGER_CST
  2199. || tree_int_cst_sign_bit (op1))
  2200. return max;
  2201. bnd = derive_constant_upper_bound (op0);
  2202. return wi::udiv_floor (bnd, wi::to_widest (op1));
  2203. case BIT_AND_EXPR:
  2204. if (TREE_CODE (op1) != INTEGER_CST
  2205. || tree_int_cst_sign_bit (op1))
  2206. return max;
  2207. return wi::to_widest (op1);
  2208. case SSA_NAME:
  2209. stmt = SSA_NAME_DEF_STMT (op0);
  2210. if (gimple_code (stmt) != GIMPLE_ASSIGN
  2211. || gimple_assign_lhs (stmt) != op0)
  2212. return max;
  2213. return derive_constant_upper_bound_assign (stmt);
  2214. default:
  2215. return max;
  2216. }
  2217. }
  2218. /* Emit a -Waggressive-loop-optimizations warning if needed. */
  2219. static void
  2220. do_warn_aggressive_loop_optimizations (struct loop *loop,
  2221. widest_int i_bound, gimple stmt)
  2222. {
  2223. /* Don't warn if the loop doesn't have known constant bound. */
  2224. if (!loop->nb_iterations
  2225. || TREE_CODE (loop->nb_iterations) != INTEGER_CST
  2226. || !warn_aggressive_loop_optimizations
  2227. /* To avoid warning multiple times for the same loop,
  2228. only start warning when we preserve loops. */
  2229. || (cfun->curr_properties & PROP_loops) == 0
  2230. /* Only warn once per loop. */
  2231. || loop->warned_aggressive_loop_optimizations
  2232. /* Only warn if undefined behavior gives us lower estimate than the
  2233. known constant bound. */
  2234. || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
  2235. /* And undefined behavior happens unconditionally. */
  2236. || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
  2237. return;
  2238. edge e = single_exit (loop);
  2239. if (e == NULL)
  2240. return;
  2241. gimple estmt = last_stmt (e->src);
  2242. if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
  2243. "iteration %E invokes undefined behavior",
  2244. wide_int_to_tree (TREE_TYPE (loop->nb_iterations),
  2245. i_bound)))
  2246. inform (gimple_location (estmt), "containing loop");
  2247. loop->warned_aggressive_loop_optimizations = true;
  2248. }
  2249. /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
  2250. is true if the loop is exited immediately after STMT, and this exit
  2251. is taken at last when the STMT is executed BOUND + 1 times.
  2252. REALISTIC is true if BOUND is expected to be close to the real number
  2253. of iterations. UPPER is true if we are sure the loop iterates at most
  2254. BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
  2255. static void
  2256. record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
  2257. gimple at_stmt, bool is_exit, bool realistic, bool upper)
  2258. {
  2259. widest_int delta;
  2260. if (dump_file && (dump_flags & TDF_DETAILS))
  2261. {
  2262. fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
  2263. print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
  2264. fprintf (dump_file, " is %sexecuted at most ",
  2265. upper ? "" : "probably ");
  2266. print_generic_expr (dump_file, bound, TDF_SLIM);
  2267. fprintf (dump_file, " (bounded by ");
  2268. print_decu (i_bound, dump_file);
  2269. fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
  2270. }
  2271. /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
  2272. real number of iterations. */
  2273. if (TREE_CODE (bound) != INTEGER_CST)
  2274. realistic = false;
  2275. else
  2276. gcc_checking_assert (i_bound == wi::to_widest (bound));
  2277. if (!upper && !realistic)
  2278. return;
  2279. /* If we have a guaranteed upper bound, record it in the appropriate
  2280. list, unless this is an !is_exit bound (i.e. undefined behavior in
  2281. at_stmt) in a loop with known constant number of iterations. */
  2282. if (upper
  2283. && (is_exit
  2284. || loop->nb_iterations == NULL_TREE
  2285. || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
  2286. {
  2287. struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
  2288. elt->bound = i_bound;
  2289. elt->stmt = at_stmt;
  2290. elt->is_exit = is_exit;
  2291. elt->next = loop->bounds;
  2292. loop->bounds = elt;
  2293. }
  2294. /* If statement is executed on every path to the loop latch, we can directly
  2295. infer the upper bound on the # of iterations of the loop. */
  2296. if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
  2297. return;
  2298. /* Update the number of iteration estimates according to the bound.
  2299. If at_stmt is an exit then the loop latch is executed at most BOUND times,
  2300. otherwise it can be executed BOUND + 1 times. We will lower the estimate
  2301. later if such statement must be executed on last iteration */
  2302. if (is_exit)
  2303. delta = 0;
  2304. else
  2305. delta = 1;
  2306. widest_int new_i_bound = i_bound + delta;
  2307. /* If an overflow occurred, ignore the result. */
  2308. if (wi::ltu_p (new_i_bound, delta))
  2309. return;
  2310. if (upper && !is_exit)
  2311. do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
  2312. record_niter_bound (loop, new_i_bound, realistic, upper);
  2313. }
  2314. /* Record the estimate on number of iterations of LOOP based on the fact that
  2315. the induction variable BASE + STEP * i evaluated in STMT does not wrap and
  2316. its values belong to the range <LOW, HIGH>. REALISTIC is true if the
  2317. estimated number of iterations is expected to be close to the real one.
  2318. UPPER is true if we are sure the induction variable does not wrap. */
  2319. static void
  2320. record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
  2321. tree low, tree high, bool realistic, bool upper)
  2322. {
  2323. tree niter_bound, extreme, delta;
  2324. tree type = TREE_TYPE (base), unsigned_type;
  2325. tree orig_base = base;
  2326. if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
  2327. return;
  2328. if (dump_file && (dump_flags & TDF_DETAILS))
  2329. {
  2330. fprintf (dump_file, "Induction variable (");
  2331. print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
  2332. fprintf (dump_file, ") ");
  2333. print_generic_expr (dump_file, base, TDF_SLIM);
  2334. fprintf (dump_file, " + ");
  2335. print_generic_expr (dump_file, step, TDF_SLIM);
  2336. fprintf (dump_file, " * iteration does not wrap in statement ");
  2337. print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
  2338. fprintf (dump_file, " in loop %d.\n", loop->num);
  2339. }
  2340. unsigned_type = unsigned_type_for (type);
  2341. base = fold_convert (unsigned_type, base);
  2342. step = fold_convert (unsigned_type, step);
  2343. if (tree_int_cst_sign_bit (step))
  2344. {
  2345. wide_int min, max;
  2346. extreme = fold_convert (unsigned_type, low);
  2347. if (TREE_CODE (orig_base) == SSA_NAME
  2348. && TREE_CODE (high) == INTEGER_CST
  2349. && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
  2350. && get_range_info (orig_base, &min, &max) == VR_RANGE
  2351. && wi::gts_p (high, max))
  2352. base = wide_int_to_tree (unsigned_type, max);
  2353. else if (TREE_CODE (base) != INTEGER_CST)
  2354. base = fold_convert (unsigned_type, high);
  2355. delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
  2356. step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
  2357. }
  2358. else
  2359. {
  2360. wide_int min, max;
  2361. extreme = fold_convert (unsigned_type, high);
  2362. if (TREE_CODE (orig_base) == SSA_NAME
  2363. && TREE_CODE (low) == INTEGER_CST
  2364. && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
  2365. && get_range_info (orig_base, &min, &max) == VR_RANGE
  2366. && wi::gts_p (min, low))
  2367. base = wide_int_to_tree (unsigned_type, min);
  2368. else if (TREE_CODE (base) != INTEGER_CST)
  2369. base = fold_convert (unsigned_type, low);
  2370. delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
  2371. }
  2372. /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
  2373. would get out of the range. */
  2374. niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
  2375. widest_int max = derive_constant_upper_bound (niter_bound);
  2376. record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
  2377. }
  2378. /* Determine information about number of iterations a LOOP from the index
  2379. IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
  2380. guaranteed to be executed in every iteration of LOOP. Callback for
  2381. for_each_index. */
  2382. struct ilb_data
  2383. {
  2384. struct loop *loop;
  2385. gimple stmt;
  2386. };
  2387. static bool
  2388. idx_infer_loop_bounds (tree base, tree *idx, void *dta)
  2389. {
  2390. struct ilb_data *data = (struct ilb_data *) dta;
  2391. tree ev, init, step;
  2392. tree low, high, type, next;
  2393. bool sign, upper = true, at_end = false;
  2394. struct loop *loop = data->loop;
  2395. bool reliable = true;
  2396. if (TREE_CODE (base) != ARRAY_REF)
  2397. return true;
  2398. /* For arrays at the end of the structure, we are not guaranteed that they
  2399. do not really extend over their declared size. However, for arrays of
  2400. size greater than one, this is unlikely to be intended. */
  2401. if (array_at_struct_end_p (base))
  2402. {
  2403. at_end = true;
  2404. upper = false;
  2405. }
  2406. struct loop *dloop = loop_containing_stmt (data->stmt);
  2407. if (!dloop)
  2408. return true;
  2409. ev = analyze_scalar_evolution (dloop, *idx);
  2410. ev = instantiate_parameters (loop, ev);
  2411. init = initial_condition (ev);
  2412. step = evolution_part_in_loop_num (ev, loop->num);
  2413. if (!init
  2414. || !step
  2415. || TREE_CODE (step) != INTEGER_CST
  2416. || integer_zerop (step)
  2417. || tree_contains_chrecs (init, NULL)
  2418. || chrec_contains_symbols_defined_in_loop (init, loop->num))
  2419. return true;
  2420. low = array_ref_low_bound (base);
  2421. high = array_ref_up_bound (base);
  2422. /* The case of nonconstant bounds could be handled, but it would be
  2423. complicated. */
  2424. if (TREE_CODE (low) != INTEGER_CST
  2425. || !high
  2426. || TREE_CODE (high) != INTEGER_CST)
  2427. return true;
  2428. sign = tree_int_cst_sign_bit (step);
  2429. type = TREE_TYPE (step);
  2430. /* The array of length 1 at the end of a structure most likely extends
  2431. beyond its bounds. */
  2432. if (at_end
  2433. && operand_equal_p (low, high, 0))
  2434. return true;
  2435. /* In case the relevant bound of the array does not fit in type, or
  2436. it does, but bound + step (in type) still belongs into the range of the
  2437. array, the index may wrap and still stay within the range of the array
  2438. (consider e.g. if the array is indexed by the full range of
  2439. unsigned char).
  2440. To make things simpler, we require both bounds to fit into type, although
  2441. there are cases where this would not be strictly necessary. */
  2442. if (!int_fits_type_p (high, type)
  2443. || !int_fits_type_p (low, type))
  2444. return true;
  2445. low = fold_convert (type, low);
  2446. high = fold_convert (type, high);
  2447. if (sign)
  2448. next = fold_binary (PLUS_EXPR, type, low, step);
  2449. else
  2450. next = fold_binary (PLUS_EXPR, type, high, step);
  2451. if (tree_int_cst_compare (low, next) <= 0
  2452. && tree_int_cst_compare (next, high) <= 0)
  2453. return true;
  2454. /* If access is not executed on every iteration, we must ensure that overlow may
  2455. not make the access valid later. */
  2456. if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
  2457. && scev_probably_wraps_p (initial_condition_in_loop_num (ev, loop->num),
  2458. step, data->stmt, loop, true))
  2459. reliable = false;
  2460. record_nonwrapping_iv (loop, init, step, data->stmt, low, high, reliable, upper);
  2461. return true;
  2462. }
  2463. /* Determine information about number of iterations a LOOP from the bounds
  2464. of arrays in the data reference REF accessed in STMT. RELIABLE is true if
  2465. STMT is guaranteed to be executed in every iteration of LOOP.*/
  2466. static void
  2467. infer_loop_bounds_from_ref (struct loop *loop, gimple stmt, tree ref)
  2468. {
  2469. struct ilb_data data;
  2470. data.loop = loop;
  2471. data.stmt = stmt;
  2472. for_each_index (&ref, idx_infer_loop_bounds, &data);
  2473. }
  2474. /* Determine information about number of iterations of a LOOP from the way
  2475. arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
  2476. executed in every iteration of LOOP. */
  2477. static void
  2478. infer_loop_bounds_from_array (struct loop *loop, gimple stmt)
  2479. {
  2480. if (is_gimple_assign (stmt))
  2481. {
  2482. tree op0 = gimple_assign_lhs (stmt);
  2483. tree op1 = gimple_assign_rhs1 (stmt);
  2484. /* For each memory access, analyze its access function
  2485. and record a bound on the loop iteration domain. */
  2486. if (REFERENCE_CLASS_P (op0))
  2487. infer_loop_bounds_from_ref (loop, stmt, op0);
  2488. if (REFERENCE_CLASS_P (op1))
  2489. infer_loop_bounds_from_ref (loop, stmt, op1);
  2490. }
  2491. else if (is_gimple_call (stmt))
  2492. {
  2493. tree arg, lhs;
  2494. unsigned i, n = gimple_call_num_args (stmt);
  2495. lhs = gimple_call_lhs (stmt);
  2496. if (lhs && REFERENCE_CLASS_P (lhs))
  2497. infer_loop_bounds_from_ref (loop, stmt, lhs);
  2498. for (i = 0; i < n; i++)
  2499. {
  2500. arg = gimple_call_arg (stmt, i);
  2501. if (REFERENCE_CLASS_P (arg))
  2502. infer_loop_bounds_from_ref (loop, stmt, arg);
  2503. }
  2504. }
  2505. }
  2506. /* Determine information about number of iterations of a LOOP from the fact
  2507. that pointer arithmetics in STMT does not overflow. */
  2508. static void
  2509. infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple stmt)
  2510. {
  2511. tree def, base, step, scev, type, low, high;
  2512. tree var, ptr;
  2513. if (!is_gimple_assign (stmt)
  2514. || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
  2515. return;
  2516. def = gimple_assign_lhs (stmt);
  2517. if (TREE_CODE (def) != SSA_NAME)
  2518. return;
  2519. type = TREE_TYPE (def);
  2520. if (!nowrap_type_p (type))
  2521. return;
  2522. ptr = gimple_assign_rhs1 (stmt);
  2523. if (!expr_invariant_in_loop_p (loop, ptr))
  2524. return;
  2525. var = gimple_assign_rhs2 (stmt);
  2526. if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
  2527. return;
  2528. scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
  2529. if (chrec_contains_undetermined (scev))
  2530. return;
  2531. base = initial_condition_in_loop_num (scev, loop->num);
  2532. step = evolution_part_in_loop_num (scev, loop->num);
  2533. if (!base || !step
  2534. || TREE_CODE (step) != INTEGER_CST
  2535. || tree_contains_chrecs (base, NULL)
  2536. || chrec_contains_symbols_defined_in_loop (base, loop->num))
  2537. return;
  2538. low = lower_bound_in_type (type, type);
  2539. high = upper_bound_in_type (type, type);
  2540. /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
  2541. produce a NULL pointer. The contrary would mean NULL points to an object,
  2542. while NULL is supposed to compare unequal with the address of all objects.
  2543. Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
  2544. NULL pointer since that would mean wrapping, which we assume here not to
  2545. happen. So, we can exclude NULL from the valid range of pointer
  2546. arithmetic. */
  2547. if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
  2548. low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
  2549. record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
  2550. }
  2551. /* Determine information about number of iterations of a LOOP from the fact
  2552. that signed arithmetics in STMT does not overflow. */
  2553. static void
  2554. infer_loop_bounds_from_signedness (struct loop *loop, gimple stmt)
  2555. {
  2556. tree def, base, step, scev, type, low, high;
  2557. if (gimple_code (stmt) != GIMPLE_ASSIGN)
  2558. return;
  2559. def = gimple_assign_lhs (stmt);
  2560. if (TREE_CODE (def) != SSA_NAME)
  2561. return;
  2562. type = TREE_TYPE (def);
  2563. if (!INTEGRAL_TYPE_P (type)
  2564. || !TYPE_OVERFLOW_UNDEFINED (type))
  2565. return;
  2566. scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
  2567. if (chrec_contains_undetermined (scev))
  2568. return;
  2569. base = initial_condition_in_loop_num (scev, loop->num);
  2570. step = evolution_part_in_loop_num (scev, loop->num);
  2571. if (!base || !step
  2572. || TREE_CODE (step) != INTEGER_CST
  2573. || tree_contains_chrecs (base, NULL)
  2574. || chrec_contains_symbols_defined_in_loop (base, loop->num))
  2575. return;
  2576. low = lower_bound_in_type (type, type);
  2577. high = upper_bound_in_type (type, type);
  2578. record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
  2579. }
  2580. /* The following analyzers are extracting informations on the bounds
  2581. of LOOP from the following undefined behaviors:
  2582. - data references should not access elements over the statically
  2583. allocated size,
  2584. - signed variables should not overflow when flag_wrapv is not set.
  2585. */
  2586. static void
  2587. infer_loop_bounds_from_undefined (struct loop *loop)
  2588. {
  2589. unsigned i;
  2590. basic_block *bbs;
  2591. gimple_stmt_iterator bsi;
  2592. basic_block bb;
  2593. bool reliable;
  2594. bbs = get_loop_body (loop);
  2595. for (i = 0; i < loop->num_nodes; i++)
  2596. {
  2597. bb = bbs[i];
  2598. /* If BB is not executed in each iteration of the loop, we cannot
  2599. use the operations in it to infer reliable upper bound on the
  2600. # of iterations of the loop. However, we can use it as a guess.
  2601. Reliable guesses come only from array bounds. */
  2602. reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
  2603. for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
  2604. {
  2605. gimple stmt = gsi_stmt (bsi);
  2606. infer_loop_bounds_from_array (loop, stmt);
  2607. if (reliable)
  2608. {
  2609. infer_loop_bounds_from_signedness (loop, stmt);
  2610. infer_loop_bounds_from_pointer_arith (loop, stmt);
  2611. }
  2612. }
  2613. }
  2614. free (bbs);
  2615. }
  2616. /* Compare wide ints, callback for qsort. */
  2617. static int
  2618. wide_int_cmp (const void *p1, const void *p2)
  2619. {
  2620. const widest_int *d1 = (const widest_int *) p1;
  2621. const widest_int *d2 = (const widest_int *) p2;
  2622. return wi::cmpu (*d1, *d2);
  2623. }
  2624. /* Return index of BOUND in BOUNDS array sorted in increasing order.
  2625. Lookup by binary search. */
  2626. static int
  2627. bound_index (vec<widest_int> bounds, const widest_int &bound)
  2628. {
  2629. unsigned int end = bounds.length ();
  2630. unsigned int begin = 0;
  2631. /* Find a matching index by means of a binary search. */
  2632. while (begin != end)
  2633. {
  2634. unsigned int middle = (begin + end) / 2;
  2635. widest_int index = bounds[middle];
  2636. if (index == bound)
  2637. return middle;
  2638. else if (wi::ltu_p (index, bound))
  2639. begin = middle + 1;
  2640. else
  2641. end = middle;
  2642. }
  2643. gcc_unreachable ();
  2644. }
  2645. /* We recorded loop bounds only for statements dominating loop latch (and thus
  2646. executed each loop iteration). If there are any bounds on statements not
  2647. dominating the loop latch we can improve the estimate by walking the loop
  2648. body and seeing if every path from loop header to loop latch contains
  2649. some bounded statement. */
  2650. static void
  2651. discover_iteration_bound_by_body_walk (struct loop *loop)
  2652. {
  2653. struct nb_iter_bound *elt;
  2654. vec<widest_int> bounds = vNULL;
  2655. vec<vec<basic_block> > queues = vNULL;
  2656. vec<basic_block> queue = vNULL;
  2657. ptrdiff_t queue_index;
  2658. ptrdiff_t latch_index = 0;
  2659. /* Discover what bounds may interest us. */
  2660. for (elt = loop->bounds; elt; elt = elt->next)
  2661. {
  2662. widest_int bound = elt->bound;
  2663. /* Exit terminates loop at given iteration, while non-exits produce undefined
  2664. effect on the next iteration. */
  2665. if (!elt->is_exit)
  2666. {
  2667. bound += 1;
  2668. /* If an overflow occurred, ignore the result. */
  2669. if (bound == 0)
  2670. continue;
  2671. }
  2672. if (!loop->any_upper_bound
  2673. || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
  2674. bounds.safe_push (bound);
  2675. }
  2676. /* Exit early if there is nothing to do. */
  2677. if (!bounds.exists ())
  2678. return;
  2679. if (dump_file && (dump_flags & TDF_DETAILS))
  2680. fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
  2681. /* Sort the bounds in decreasing order. */
  2682. bounds.qsort (wide_int_cmp);
  2683. /* For every basic block record the lowest bound that is guaranteed to
  2684. terminate the loop. */
  2685. hash_map<basic_block, ptrdiff_t> bb_bounds;
  2686. for (elt = loop->bounds; elt; elt = elt->next)
  2687. {
  2688. widest_int bound = elt->bound;
  2689. if (!elt->is_exit)
  2690. {
  2691. bound += 1;
  2692. /* If an overflow occurred, ignore the result. */
  2693. if (bound == 0)
  2694. continue;
  2695. }
  2696. if (!loop->any_upper_bound
  2697. || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
  2698. {
  2699. ptrdiff_t index = bound_index (bounds, bound);
  2700. ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
  2701. if (!entry)
  2702. bb_bounds.put (gimple_bb (elt->stmt), index);
  2703. else if ((ptrdiff_t)*entry > index)
  2704. *entry = index;
  2705. }
  2706. }
  2707. hash_map<basic_block, ptrdiff_t> block_priority;
  2708. /* Perform shortest path discovery loop->header ... loop->latch.
  2709. The "distance" is given by the smallest loop bound of basic block
  2710. present in the path and we look for path with largest smallest bound
  2711. on it.
  2712. To avoid the need for fibonacci heap on double ints we simply compress
  2713. double ints into indexes to BOUNDS array and then represent the queue
  2714. as arrays of queues for every index.
  2715. Index of BOUNDS.length() means that the execution of given BB has
  2716. no bounds determined.
  2717. VISITED is a pointer map translating basic block into smallest index
  2718. it was inserted into the priority queue with. */
  2719. latch_index = -1;
  2720. /* Start walk in loop header with index set to infinite bound. */
  2721. queue_index = bounds.length ();
  2722. queues.safe_grow_cleared (queue_index + 1);
  2723. queue.safe_push (loop->header);
  2724. queues[queue_index] = queue;
  2725. block_priority.put (loop->header, queue_index);
  2726. for (; queue_index >= 0; queue_index--)
  2727. {
  2728. if (latch_index < queue_index)
  2729. {
  2730. while (queues[queue_index].length ())
  2731. {
  2732. basic_block bb;
  2733. ptrdiff_t bound_index = queue_index;
  2734. edge e;
  2735. edge_iterator ei;
  2736. queue = queues[queue_index];
  2737. bb = queue.pop ();
  2738. /* OK, we later inserted the BB with lower priority, skip it. */
  2739. if (*block_priority.get (bb) > queue_index)
  2740. continue;
  2741. /* See if we can improve the bound. */
  2742. ptrdiff_t *entry = bb_bounds.get (bb);
  2743. if (entry && *entry < bound_index)
  2744. bound_index = *entry;
  2745. /* Insert succesors into the queue, watch for latch edge
  2746. and record greatest index we saw. */
  2747. FOR_EACH_EDGE (e, ei, bb->succs)
  2748. {
  2749. bool insert = false;
  2750. if (loop_exit_edge_p (loop, e))
  2751. continue;
  2752. if (e == loop_latch_edge (loop)
  2753. && latch_index < bound_index)
  2754. latch_index = bound_index;
  2755. else if (!(entry = block_priority.get (e->dest)))
  2756. {
  2757. insert = true;
  2758. block_priority.put (e->dest, bound_index);
  2759. }
  2760. else if (*entry < bound_index)
  2761. {
  2762. insert = true;
  2763. *entry = bound_index;
  2764. }
  2765. if (insert)
  2766. queues[bound_index].safe_push (e->dest);
  2767. }
  2768. }
  2769. }
  2770. queues[queue_index].release ();
  2771. }
  2772. gcc_assert (latch_index >= 0);
  2773. if ((unsigned)latch_index < bounds.length ())
  2774. {
  2775. if (dump_file && (dump_flags & TDF_DETAILS))
  2776. {
  2777. fprintf (dump_file, "Found better loop bound ");
  2778. print_decu (bounds[latch_index], dump_file);
  2779. fprintf (dump_file, "\n");
  2780. }
  2781. record_niter_bound (loop, bounds[latch_index], false, true);
  2782. }
  2783. queues.release ();
  2784. bounds.release ();
  2785. }
  2786. /* See if every path cross the loop goes through a statement that is known
  2787. to not execute at the last iteration. In that case we can decrese iteration
  2788. count by 1. */
  2789. static void
  2790. maybe_lower_iteration_bound (struct loop *loop)
  2791. {
  2792. hash_set<gimple> *not_executed_last_iteration = NULL;
  2793. struct nb_iter_bound *elt;
  2794. bool found_exit = false;
  2795. vec<basic_block> queue = vNULL;
  2796. bitmap visited;
  2797. /* Collect all statements with interesting (i.e. lower than
  2798. nb_iterations_upper_bound) bound on them.
  2799. TODO: Due to the way record_estimate choose estimates to store, the bounds
  2800. will be always nb_iterations_upper_bound-1. We can change this to record
  2801. also statements not dominating the loop latch and update the walk bellow
  2802. to the shortest path algorthm. */
  2803. for (elt = loop->bounds; elt; elt = elt->next)
  2804. {
  2805. if (!elt->is_exit
  2806. && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
  2807. {
  2808. if (!not_executed_last_iteration)
  2809. not_executed_last_iteration = new hash_set<gimple>;
  2810. not_executed_last_iteration->add (elt->stmt);
  2811. }
  2812. }
  2813. if (!not_executed_last_iteration)
  2814. return;
  2815. /* Start DFS walk in the loop header and see if we can reach the
  2816. loop latch or any of the exits (including statements with side
  2817. effects that may terminate the loop otherwise) without visiting
  2818. any of the statements known to have undefined effect on the last
  2819. iteration. */
  2820. queue.safe_push (loop->header);
  2821. visited = BITMAP_ALLOC (NULL);
  2822. bitmap_set_bit (visited, loop->header->index);
  2823. found_exit = false;
  2824. do
  2825. {
  2826. basic_block bb = queue.pop ();
  2827. gimple_stmt_iterator gsi;
  2828. bool stmt_found = false;
  2829. /* Loop for possible exits and statements bounding the execution. */
  2830. for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  2831. {
  2832. gimple stmt = gsi_stmt (gsi);
  2833. if (not_executed_last_iteration->contains (stmt))
  2834. {
  2835. stmt_found = true;
  2836. break;
  2837. }
  2838. if (gimple_has_side_effects (stmt))
  2839. {
  2840. found_exit = true;
  2841. break;
  2842. }
  2843. }
  2844. if (found_exit)
  2845. break;
  2846. /* If no bounding statement is found, continue the walk. */
  2847. if (!stmt_found)
  2848. {
  2849. edge e;
  2850. edge_iterator ei;
  2851. FOR_EACH_EDGE (e, ei, bb->succs)
  2852. {
  2853. if (loop_exit_edge_p (loop, e)
  2854. || e == loop_latch_edge (loop))
  2855. {
  2856. found_exit = true;
  2857. break;
  2858. }
  2859. if (bitmap_set_bit (visited, e->dest->index))
  2860. queue.safe_push (e->dest);
  2861. }
  2862. }
  2863. }
  2864. while (queue.length () && !found_exit);
  2865. /* If every path through the loop reach bounding statement before exit,
  2866. then we know the last iteration of the loop will have undefined effect
  2867. and we can decrease number of iterations. */
  2868. if (!found_exit)
  2869. {
  2870. if (dump_file && (dump_flags & TDF_DETAILS))
  2871. fprintf (dump_file, "Reducing loop iteration estimate by 1; "
  2872. "undefined statement must be executed at the last iteration.\n");
  2873. record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
  2874. false, true);
  2875. }
  2876. BITMAP_FREE (visited);
  2877. queue.release ();
  2878. delete not_executed_last_iteration;
  2879. }
  2880. /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
  2881. is true also use estimates derived from undefined behavior. */
  2882. static void
  2883. estimate_numbers_of_iterations_loop (struct loop *loop)
  2884. {
  2885. vec<edge> exits;
  2886. tree niter, type;
  2887. unsigned i;
  2888. struct tree_niter_desc niter_desc;
  2889. edge ex;
  2890. widest_int bound;
  2891. edge likely_exit;
  2892. /* Give up if we already have tried to compute an estimation. */
  2893. if (loop->estimate_state != EST_NOT_COMPUTED)
  2894. return;
  2895. loop->estimate_state = EST_AVAILABLE;
  2896. /* Force estimate compuation but leave any existing upper bound in place. */
  2897. loop->any_estimate = false;
  2898. /* Ensure that loop->nb_iterations is computed if possible. If it turns out
  2899. to be constant, we avoid undefined behavior implied bounds and instead
  2900. diagnose those loops with -Waggressive-loop-optimizations. */
  2901. number_of_latch_executions (loop);
  2902. exits = get_loop_exit_edges (loop);
  2903. likely_exit = single_likely_exit (loop);
  2904. FOR_EACH_VEC_ELT (exits, i, ex)
  2905. {
  2906. if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
  2907. continue;
  2908. niter = niter_desc.niter;
  2909. type = TREE_TYPE (niter);
  2910. if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
  2911. niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
  2912. build_int_cst (type, 0),
  2913. niter);
  2914. record_estimate (loop, niter, niter_desc.max,
  2915. last_stmt (ex->src),
  2916. true, ex == likely_exit, true);
  2917. }
  2918. exits.release ();
  2919. if (flag_aggressive_loop_optimizations)
  2920. infer_loop_bounds_from_undefined (loop);
  2921. discover_iteration_bound_by_body_walk (loop);
  2922. maybe_lower_iteration_bound (loop);
  2923. /* If we have a measured profile, use it to estimate the number of
  2924. iterations. */
  2925. if (loop->header->count != 0)
  2926. {
  2927. gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
  2928. bound = gcov_type_to_wide_int (nit);
  2929. record_niter_bound (loop, bound, true, false);
  2930. }
  2931. /* If we know the exact number of iterations of this loop, try to
  2932. not break code with undefined behavior by not recording smaller
  2933. maximum number of iterations. */
  2934. if (loop->nb_iterations
  2935. && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
  2936. {
  2937. loop->any_upper_bound = true;
  2938. loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
  2939. }
  2940. }
  2941. /* Sets NIT to the estimated number of executions of the latch of the
  2942. LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
  2943. large as the number of iterations. If we have no reliable estimate,
  2944. the function returns false, otherwise returns true. */
  2945. bool
  2946. estimated_loop_iterations (struct loop *loop, widest_int *nit)
  2947. {
  2948. /* When SCEV information is available, try to update loop iterations
  2949. estimate. Otherwise just return whatever we recorded earlier. */
  2950. if (scev_initialized_p ())
  2951. estimate_numbers_of_iterations_loop (loop);
  2952. return (get_estimated_loop_iterations (loop, nit));
  2953. }
  2954. /* Similar to estimated_loop_iterations, but returns the estimate only
  2955. if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
  2956. on the number of iterations of LOOP could not be derived, returns -1. */
  2957. HOST_WIDE_INT
  2958. estimated_loop_iterations_int (struct loop *loop)
  2959. {
  2960. widest_int nit;
  2961. HOST_WIDE_INT hwi_nit;
  2962. if (!estimated_loop_iterations (loop, &nit))
  2963. return -1;
  2964. if (!wi::fits_shwi_p (nit))
  2965. return -1;
  2966. hwi_nit = nit.to_shwi ();
  2967. return hwi_nit < 0 ? -1 : hwi_nit;
  2968. }
  2969. /* Sets NIT to an upper bound for the maximum number of executions of the
  2970. latch of the LOOP. If we have no reliable estimate, the function returns
  2971. false, otherwise returns true. */
  2972. bool
  2973. max_loop_iterations (struct loop *loop, widest_int *nit)
  2974. {
  2975. /* When SCEV information is available, try to update loop iterations
  2976. estimate. Otherwise just return whatever we recorded earlier. */
  2977. if (scev_initialized_p ())
  2978. estimate_numbers_of_iterations_loop (loop);
  2979. return get_max_loop_iterations (loop, nit);
  2980. }
  2981. /* Similar to max_loop_iterations, but returns the estimate only
  2982. if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
  2983. on the number of iterations of LOOP could not be derived, returns -1. */
  2984. HOST_WIDE_INT
  2985. max_loop_iterations_int (struct loop *loop)
  2986. {
  2987. widest_int nit;
  2988. HOST_WIDE_INT hwi_nit;
  2989. if (!max_loop_iterations (loop, &nit))
  2990. return -1;
  2991. if (!wi::fits_shwi_p (nit))
  2992. return -1;
  2993. hwi_nit = nit.to_shwi ();
  2994. return hwi_nit < 0 ? -1 : hwi_nit;
  2995. }
  2996. /* Returns an estimate for the number of executions of statements
  2997. in the LOOP. For statements before the loop exit, this exceeds
  2998. the number of execution of the latch by one. */
  2999. HOST_WIDE_INT
  3000. estimated_stmt_executions_int (struct loop *loop)
  3001. {
  3002. HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
  3003. HOST_WIDE_INT snit;
  3004. if (nit == -1)
  3005. return -1;
  3006. snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
  3007. /* If the computation overflows, return -1. */
  3008. return snit < 0 ? -1 : snit;
  3009. }
  3010. /* Sets NIT to the estimated maximum number of executions of the latch of the
  3011. LOOP, plus one. If we have no reliable estimate, the function returns
  3012. false, otherwise returns true. */
  3013. bool
  3014. max_stmt_executions (struct loop *loop, widest_int *nit)
  3015. {
  3016. widest_int nit_minus_one;
  3017. if (!max_loop_iterations (loop, nit))
  3018. return false;
  3019. nit_minus_one = *nit;
  3020. *nit += 1;
  3021. return wi::gtu_p (*nit, nit_minus_one);
  3022. }
  3023. /* Sets NIT to the estimated number of executions of the latch of the
  3024. LOOP, plus one. If we have no reliable estimate, the function returns
  3025. false, otherwise returns true. */
  3026. bool
  3027. estimated_stmt_executions (struct loop *loop, widest_int *nit)
  3028. {
  3029. widest_int nit_minus_one;
  3030. if (!estimated_loop_iterations (loop, nit))
  3031. return false;
  3032. nit_minus_one = *nit;
  3033. *nit += 1;
  3034. return wi::gtu_p (*nit, nit_minus_one);
  3035. }
  3036. /* Records estimates on numbers of iterations of loops. */
  3037. void
  3038. estimate_numbers_of_iterations (void)
  3039. {
  3040. struct loop *loop;
  3041. /* We don't want to issue signed overflow warnings while getting
  3042. loop iteration estimates. */
  3043. fold_defer_overflow_warnings ();
  3044. FOR_EACH_LOOP (loop, 0)
  3045. {
  3046. estimate_numbers_of_iterations_loop (loop);
  3047. }
  3048. fold_undefer_and_ignore_overflow_warnings ();
  3049. }
  3050. /* Returns true if statement S1 dominates statement S2. */
  3051. bool
  3052. stmt_dominates_stmt_p (gimple s1, gimple s2)
  3053. {
  3054. basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
  3055. if (!bb1
  3056. || s1 == s2)
  3057. return true;
  3058. if (bb1 == bb2)
  3059. {
  3060. gimple_stmt_iterator bsi;
  3061. if (gimple_code (s2) == GIMPLE_PHI)
  3062. return false;
  3063. if (gimple_code (s1) == GIMPLE_PHI)
  3064. return true;
  3065. for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
  3066. if (gsi_stmt (bsi) == s1)
  3067. return true;
  3068. return false;
  3069. }
  3070. return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
  3071. }
  3072. /* Returns true when we can prove that the number of executions of
  3073. STMT in the loop is at most NITER, according to the bound on
  3074. the number of executions of the statement NITER_BOUND->stmt recorded in
  3075. NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
  3076. ??? This code can become quite a CPU hog - we can have many bounds,
  3077. and large basic block forcing stmt_dominates_stmt_p to be queried
  3078. many times on a large basic blocks, so the whole thing is O(n^2)
  3079. for scev_probably_wraps_p invocation (that can be done n times).
  3080. It would make more sense (and give better answers) to remember BB
  3081. bounds computed by discover_iteration_bound_by_body_walk. */
  3082. static bool
  3083. n_of_executions_at_most (gimple stmt,
  3084. struct nb_iter_bound *niter_bound,
  3085. tree niter)
  3086. {
  3087. widest_int bound = niter_bound->bound;
  3088. tree nit_type = TREE_TYPE (niter), e;
  3089. enum tree_code cmp;
  3090. gcc_assert (TYPE_UNSIGNED (nit_type));
  3091. /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
  3092. the number of iterations is small. */
  3093. if (!wi::fits_to_tree_p (bound, nit_type))
  3094. return false;
  3095. /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
  3096. times. This means that:
  3097. -- if NITER_BOUND->is_exit is true, then everything after
  3098. it at most NITER_BOUND->bound times.
  3099. -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
  3100. is executed, then NITER_BOUND->stmt is executed as well in the same
  3101. iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
  3102. If we can determine that NITER_BOUND->stmt is always executed
  3103. after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
  3104. We conclude that if both statements belong to the same
  3105. basic block and STMT is before NITER_BOUND->stmt and there are no
  3106. statements with side effects in between. */
  3107. if (niter_bound->is_exit)
  3108. {
  3109. if (stmt == niter_bound->stmt
  3110. || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
  3111. return false;
  3112. cmp = GE_EXPR;
  3113. }
  3114. else
  3115. {
  3116. if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
  3117. {
  3118. gimple_stmt_iterator bsi;
  3119. if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
  3120. || gimple_code (stmt) == GIMPLE_PHI
  3121. || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
  3122. return false;
  3123. /* By stmt_dominates_stmt_p we already know that STMT appears
  3124. before NITER_BOUND->STMT. Still need to test that the loop
  3125. can not be terinated by a side effect in between. */
  3126. for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
  3127. gsi_next (&bsi))
  3128. if (gimple_has_side_effects (gsi_stmt (bsi)))
  3129. return false;
  3130. bound += 1;
  3131. if (bound == 0
  3132. || !wi::fits_to_tree_p (bound, nit_type))
  3133. return false;
  3134. }
  3135. cmp = GT_EXPR;
  3136. }
  3137. e = fold_binary (cmp, boolean_type_node,
  3138. niter, wide_int_to_tree (nit_type, bound));
  3139. return e && integer_nonzerop (e);
  3140. }
  3141. /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
  3142. bool
  3143. nowrap_type_p (tree type)
  3144. {
  3145. if (INTEGRAL_TYPE_P (type)
  3146. && TYPE_OVERFLOW_UNDEFINED (type))
  3147. return true;
  3148. if (POINTER_TYPE_P (type))
  3149. return true;
  3150. return false;
  3151. }
  3152. /* Return false only when the induction variable BASE + STEP * I is
  3153. known to not overflow: i.e. when the number of iterations is small
  3154. enough with respect to the step and initial condition in order to
  3155. keep the evolution confined in TYPEs bounds. Return true when the
  3156. iv is known to overflow or when the property is not computable.
  3157. USE_OVERFLOW_SEMANTICS is true if this function should assume that
  3158. the rules for overflow of the given language apply (e.g., that signed
  3159. arithmetics in C does not overflow). */
  3160. bool
  3161. scev_probably_wraps_p (tree base, tree step,
  3162. gimple at_stmt, struct loop *loop,
  3163. bool use_overflow_semantics)
  3164. {
  3165. tree delta, step_abs;
  3166. tree unsigned_type, valid_niter;
  3167. tree type = TREE_TYPE (step);
  3168. tree e;
  3169. widest_int niter;
  3170. struct nb_iter_bound *bound;
  3171. /* FIXME: We really need something like
  3172. http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
  3173. We used to test for the following situation that frequently appears
  3174. during address arithmetics:
  3175. D.1621_13 = (long unsigned intD.4) D.1620_12;
  3176. D.1622_14 = D.1621_13 * 8;
  3177. D.1623_15 = (doubleD.29 *) D.1622_14;
  3178. And derived that the sequence corresponding to D_14
  3179. can be proved to not wrap because it is used for computing a
  3180. memory access; however, this is not really the case -- for example,
  3181. if D_12 = (unsigned char) [254,+,1], then D_14 has values
  3182. 2032, 2040, 0, 8, ..., but the code is still legal. */
  3183. if (chrec_contains_undetermined (base)
  3184. || chrec_contains_undetermined (step))
  3185. return true;
  3186. if (integer_zerop (step))
  3187. return false;
  3188. /* If we can use the fact that signed and pointer arithmetics does not
  3189. wrap, we are done. */
  3190. if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
  3191. return false;
  3192. /* To be able to use estimates on number of iterations of the loop,
  3193. we must have an upper bound on the absolute value of the step. */
  3194. if (TREE_CODE (step) != INTEGER_CST)
  3195. return true;
  3196. /* Don't issue signed overflow warnings. */
  3197. fold_defer_overflow_warnings ();
  3198. /* Otherwise, compute the number of iterations before we reach the
  3199. bound of the type, and verify that the loop is exited before this
  3200. occurs. */
  3201. unsigned_type = unsigned_type_for (type);
  3202. base = fold_convert (unsigned_type, base);
  3203. if (tree_int_cst_sign_bit (step))
  3204. {
  3205. tree extreme = fold_convert (unsigned_type,
  3206. lower_bound_in_type (type, type));
  3207. delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
  3208. step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
  3209. fold_convert (unsigned_type, step));
  3210. }
  3211. else
  3212. {
  3213. tree extreme = fold_convert (unsigned_type,
  3214. upper_bound_in_type (type, type));
  3215. delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
  3216. step_abs = fold_convert (unsigned_type, step);
  3217. }
  3218. valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
  3219. estimate_numbers_of_iterations_loop (loop);
  3220. if (max_loop_iterations (loop, &niter)
  3221. && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
  3222. && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
  3223. wide_int_to_tree (TREE_TYPE (valid_niter),
  3224. niter))) != NULL
  3225. && integer_nonzerop (e))
  3226. {
  3227. fold_undefer_and_ignore_overflow_warnings ();
  3228. return false;
  3229. }
  3230. if (at_stmt)
  3231. for (bound = loop->bounds; bound; bound = bound->next)
  3232. {
  3233. if (n_of_executions_at_most (at_stmt, bound, valid_niter))
  3234. {
  3235. fold_undefer_and_ignore_overflow_warnings ();
  3236. return false;
  3237. }
  3238. }
  3239. fold_undefer_and_ignore_overflow_warnings ();
  3240. /* At this point we still don't have a proof that the iv does not
  3241. overflow: give up. */
  3242. return true;
  3243. }
  3244. /* Frees the information on upper bounds on numbers of iterations of LOOP. */
  3245. void
  3246. free_numbers_of_iterations_estimates_loop (struct loop *loop)
  3247. {
  3248. struct nb_iter_bound *bound, *next;
  3249. loop->nb_iterations = NULL;
  3250. loop->estimate_state = EST_NOT_COMPUTED;
  3251. for (bound = loop->bounds; bound; bound = next)
  3252. {
  3253. next = bound->next;
  3254. ggc_free (bound);
  3255. }
  3256. loop->bounds = NULL;
  3257. }
  3258. /* Frees the information on upper bounds on numbers of iterations of loops. */
  3259. void
  3260. free_numbers_of_iterations_estimates (void)
  3261. {
  3262. struct loop *loop;
  3263. FOR_EACH_LOOP (loop, 0)
  3264. {
  3265. free_numbers_of_iterations_estimates_loop (loop);
  3266. }
  3267. }
  3268. /* Substitute value VAL for ssa name NAME inside expressions held
  3269. at LOOP. */
  3270. void
  3271. substitute_in_loop_info (struct loop *loop, tree name, tree val)
  3272. {
  3273. loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
  3274. }