tree-ssa-math-opts.c 100 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385
  1. /* Global, SSA-based optimizations using mathematical identities.
  2. Copyright (C) 2005-2015 Free Software Foundation, Inc.
  3. This file is part of GCC.
  4. GCC is free software; you can redistribute it and/or modify it
  5. under the terms of the GNU General Public License as published by the
  6. Free Software Foundation; either version 3, or (at your option) any
  7. later version.
  8. GCC is distributed in the hope that it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  11. for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with GCC; see the file COPYING3. If not see
  14. <http://www.gnu.org/licenses/>. */
  15. /* Currently, the only mini-pass in this file tries to CSE reciprocal
  16. operations. These are common in sequences such as this one:
  17. modulus = sqrt(x*x + y*y + z*z);
  18. x = x / modulus;
  19. y = y / modulus;
  20. z = z / modulus;
  21. that can be optimized to
  22. modulus = sqrt(x*x + y*y + z*z);
  23. rmodulus = 1.0 / modulus;
  24. x = x * rmodulus;
  25. y = y * rmodulus;
  26. z = z * rmodulus;
  27. We do this for loop invariant divisors, and with this pass whenever
  28. we notice that a division has the same divisor multiple times.
  29. Of course, like in PRE, we don't insert a division if a dominator
  30. already has one. However, this cannot be done as an extension of
  31. PRE for several reasons.
  32. First of all, with some experiments it was found out that the
  33. transformation is not always useful if there are only two divisions
  34. hy the same divisor. This is probably because modern processors
  35. can pipeline the divisions; on older, in-order processors it should
  36. still be effective to optimize two divisions by the same number.
  37. We make this a param, and it shall be called N in the remainder of
  38. this comment.
  39. Second, if trapping math is active, we have less freedom on where
  40. to insert divisions: we can only do so in basic blocks that already
  41. contain one. (If divisions don't trap, instead, we can insert
  42. divisions elsewhere, which will be in blocks that are common dominators
  43. of those that have the division).
  44. We really don't want to compute the reciprocal unless a division will
  45. be found. To do this, we won't insert the division in a basic block
  46. that has less than N divisions *post-dominating* it.
  47. The algorithm constructs a subset of the dominator tree, holding the
  48. blocks containing the divisions and the common dominators to them,
  49. and walk it twice. The first walk is in post-order, and it annotates
  50. each block with the number of divisions that post-dominate it: this
  51. gives information on where divisions can be inserted profitably.
  52. The second walk is in pre-order, and it inserts divisions as explained
  53. above, and replaces divisions by multiplications.
  54. In the best case, the cost of the pass is O(n_statements). In the
  55. worst-case, the cost is due to creating the dominator tree subset,
  56. with a cost of O(n_basic_blocks ^ 2); however this can only happen
  57. for n_statements / n_basic_blocks statements. So, the amortized cost
  58. of creating the dominator tree subset is O(n_basic_blocks) and the
  59. worst-case cost of the pass is O(n_statements * n_basic_blocks).
  60. More practically, the cost will be small because there are few
  61. divisions, and they tend to be in the same basic block, so insert_bb
  62. is called very few times.
  63. If we did this using domwalk.c, an efficient implementation would have
  64. to work on all the variables in a single pass, because we could not
  65. work on just a subset of the dominator tree, as we do now, and the
  66. cost would also be something like O(n_statements * n_basic_blocks).
  67. The data structures would be more complex in order to work on all the
  68. variables in a single pass. */
  69. #include "config.h"
  70. #include "system.h"
  71. #include "coretypes.h"
  72. #include "tm.h"
  73. #include "flags.h"
  74. #include "hash-set.h"
  75. #include "machmode.h"
  76. #include "vec.h"
  77. #include "double-int.h"
  78. #include "input.h"
  79. #include "alias.h"
  80. #include "symtab.h"
  81. #include "wide-int.h"
  82. #include "inchash.h"
  83. #include "tree.h"
  84. #include "fold-const.h"
  85. #include "predict.h"
  86. #include "hard-reg-set.h"
  87. #include "function.h"
  88. #include "dominance.h"
  89. #include "cfg.h"
  90. #include "basic-block.h"
  91. #include "tree-ssa-alias.h"
  92. #include "internal-fn.h"
  93. #include "gimple-fold.h"
  94. #include "gimple-expr.h"
  95. #include "is-a.h"
  96. #include "gimple.h"
  97. #include "gimple-iterator.h"
  98. #include "gimplify.h"
  99. #include "gimplify-me.h"
  100. #include "stor-layout.h"
  101. #include "gimple-ssa.h"
  102. #include "tree-cfg.h"
  103. #include "tree-phinodes.h"
  104. #include "ssa-iterators.h"
  105. #include "stringpool.h"
  106. #include "tree-ssanames.h"
  107. #include "hashtab.h"
  108. #include "rtl.h"
  109. #include "statistics.h"
  110. #include "real.h"
  111. #include "fixed-value.h"
  112. #include "insn-config.h"
  113. #include "expmed.h"
  114. #include "dojump.h"
  115. #include "explow.h"
  116. #include "calls.h"
  117. #include "emit-rtl.h"
  118. #include "varasm.h"
  119. #include "stmt.h"
  120. #include "expr.h"
  121. #include "tree-dfa.h"
  122. #include "tree-ssa.h"
  123. #include "tree-pass.h"
  124. #include "alloc-pool.h"
  125. #include "target.h"
  126. #include "gimple-pretty-print.h"
  127. #include "builtins.h"
  128. /* FIXME: RTL headers have to be included here for optabs. */
  129. #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
  130. #include "expr.h" /* Because optabs.h wants sepops. */
  131. #include "insn-codes.h"
  132. #include "optabs.h"
  133. /* This structure represents one basic block that either computes a
  134. division, or is a common dominator for basic block that compute a
  135. division. */
  136. struct occurrence {
  137. /* The basic block represented by this structure. */
  138. basic_block bb;
  139. /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
  140. inserted in BB. */
  141. tree recip_def;
  142. /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
  143. was inserted in BB. */
  144. gimple recip_def_stmt;
  145. /* Pointer to a list of "struct occurrence"s for blocks dominated
  146. by BB. */
  147. struct occurrence *children;
  148. /* Pointer to the next "struct occurrence"s in the list of blocks
  149. sharing a common dominator. */
  150. struct occurrence *next;
  151. /* The number of divisions that are in BB before compute_merit. The
  152. number of divisions that are in BB or post-dominate it after
  153. compute_merit. */
  154. int num_divisions;
  155. /* True if the basic block has a division, false if it is a common
  156. dominator for basic blocks that do. If it is false and trapping
  157. math is active, BB is not a candidate for inserting a reciprocal. */
  158. bool bb_has_division;
  159. };
  160. static struct
  161. {
  162. /* Number of 1.0/X ops inserted. */
  163. int rdivs_inserted;
  164. /* Number of 1.0/FUNC ops inserted. */
  165. int rfuncs_inserted;
  166. } reciprocal_stats;
  167. static struct
  168. {
  169. /* Number of cexpi calls inserted. */
  170. int inserted;
  171. } sincos_stats;
  172. static struct
  173. {
  174. /* Number of hand-written 16-bit nop / bswaps found. */
  175. int found_16bit;
  176. /* Number of hand-written 32-bit nop / bswaps found. */
  177. int found_32bit;
  178. /* Number of hand-written 64-bit nop / bswaps found. */
  179. int found_64bit;
  180. } nop_stats, bswap_stats;
  181. static struct
  182. {
  183. /* Number of widening multiplication ops inserted. */
  184. int widen_mults_inserted;
  185. /* Number of integer multiply-and-accumulate ops inserted. */
  186. int maccs_inserted;
  187. /* Number of fp fused multiply-add ops inserted. */
  188. int fmas_inserted;
  189. } widen_mul_stats;
  190. /* The instance of "struct occurrence" representing the highest
  191. interesting block in the dominator tree. */
  192. static struct occurrence *occ_head;
  193. /* Allocation pool for getting instances of "struct occurrence". */
  194. static alloc_pool occ_pool;
  195. /* Allocate and return a new struct occurrence for basic block BB, and
  196. whose children list is headed by CHILDREN. */
  197. static struct occurrence *
  198. occ_new (basic_block bb, struct occurrence *children)
  199. {
  200. struct occurrence *occ;
  201. bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
  202. memset (occ, 0, sizeof (struct occurrence));
  203. occ->bb = bb;
  204. occ->children = children;
  205. return occ;
  206. }
  207. /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
  208. list of "struct occurrence"s, one per basic block, having IDOM as
  209. their common dominator.
  210. We try to insert NEW_OCC as deep as possible in the tree, and we also
  211. insert any other block that is a common dominator for BB and one
  212. block already in the tree. */
  213. static void
  214. insert_bb (struct occurrence *new_occ, basic_block idom,
  215. struct occurrence **p_head)
  216. {
  217. struct occurrence *occ, **p_occ;
  218. for (p_occ = p_head; (occ = *p_occ) != NULL; )
  219. {
  220. basic_block bb = new_occ->bb, occ_bb = occ->bb;
  221. basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
  222. if (dom == bb)
  223. {
  224. /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
  225. from its list. */
  226. *p_occ = occ->next;
  227. occ->next = new_occ->children;
  228. new_occ->children = occ;
  229. /* Try the next block (it may as well be dominated by BB). */
  230. }
  231. else if (dom == occ_bb)
  232. {
  233. /* OCC_BB dominates BB. Tail recurse to look deeper. */
  234. insert_bb (new_occ, dom, &occ->children);
  235. return;
  236. }
  237. else if (dom != idom)
  238. {
  239. gcc_assert (!dom->aux);
  240. /* There is a dominator between IDOM and BB, add it and make
  241. two children out of NEW_OCC and OCC. First, remove OCC from
  242. its list. */
  243. *p_occ = occ->next;
  244. new_occ->next = occ;
  245. occ->next = NULL;
  246. /* None of the previous blocks has DOM as a dominator: if we tail
  247. recursed, we would reexamine them uselessly. Just switch BB with
  248. DOM, and go on looking for blocks dominated by DOM. */
  249. new_occ = occ_new (dom, new_occ);
  250. }
  251. else
  252. {
  253. /* Nothing special, go on with the next element. */
  254. p_occ = &occ->next;
  255. }
  256. }
  257. /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
  258. new_occ->next = *p_head;
  259. *p_head = new_occ;
  260. }
  261. /* Register that we found a division in BB. */
  262. static inline void
  263. register_division_in (basic_block bb)
  264. {
  265. struct occurrence *occ;
  266. occ = (struct occurrence *) bb->aux;
  267. if (!occ)
  268. {
  269. occ = occ_new (bb, NULL);
  270. insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
  271. }
  272. occ->bb_has_division = true;
  273. occ->num_divisions++;
  274. }
  275. /* Compute the number of divisions that postdominate each block in OCC and
  276. its children. */
  277. static void
  278. compute_merit (struct occurrence *occ)
  279. {
  280. struct occurrence *occ_child;
  281. basic_block dom = occ->bb;
  282. for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
  283. {
  284. basic_block bb;
  285. if (occ_child->children)
  286. compute_merit (occ_child);
  287. if (flag_exceptions)
  288. bb = single_noncomplex_succ (dom);
  289. else
  290. bb = dom;
  291. if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
  292. occ->num_divisions += occ_child->num_divisions;
  293. }
  294. }
  295. /* Return whether USE_STMT is a floating-point division by DEF. */
  296. static inline bool
  297. is_division_by (gimple use_stmt, tree def)
  298. {
  299. return is_gimple_assign (use_stmt)
  300. && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
  301. && gimple_assign_rhs2 (use_stmt) == def
  302. /* Do not recognize x / x as valid division, as we are getting
  303. confused later by replacing all immediate uses x in such
  304. a stmt. */
  305. && gimple_assign_rhs1 (use_stmt) != def;
  306. }
  307. /* Walk the subset of the dominator tree rooted at OCC, setting the
  308. RECIP_DEF field to a definition of 1.0 / DEF that can be used in
  309. the given basic block. The field may be left NULL, of course,
  310. if it is not possible or profitable to do the optimization.
  311. DEF_BSI is an iterator pointing at the statement defining DEF.
  312. If RECIP_DEF is set, a dominator already has a computation that can
  313. be used. */
  314. static void
  315. insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
  316. tree def, tree recip_def, int threshold)
  317. {
  318. tree type;
  319. gassign *new_stmt;
  320. gimple_stmt_iterator gsi;
  321. struct occurrence *occ_child;
  322. if (!recip_def
  323. && (occ->bb_has_division || !flag_trapping_math)
  324. && occ->num_divisions >= threshold)
  325. {
  326. /* Make a variable with the replacement and substitute it. */
  327. type = TREE_TYPE (def);
  328. recip_def = create_tmp_reg (type, "reciptmp");
  329. new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
  330. build_one_cst (type), def);
  331. if (occ->bb_has_division)
  332. {
  333. /* Case 1: insert before an existing division. */
  334. gsi = gsi_after_labels (occ->bb);
  335. while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
  336. gsi_next (&gsi);
  337. gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
  338. }
  339. else if (def_gsi && occ->bb == def_gsi->bb)
  340. {
  341. /* Case 2: insert right after the definition. Note that this will
  342. never happen if the definition statement can throw, because in
  343. that case the sole successor of the statement's basic block will
  344. dominate all the uses as well. */
  345. gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
  346. }
  347. else
  348. {
  349. /* Case 3: insert in a basic block not containing defs/uses. */
  350. gsi = gsi_after_labels (occ->bb);
  351. gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
  352. }
  353. reciprocal_stats.rdivs_inserted++;
  354. occ->recip_def_stmt = new_stmt;
  355. }
  356. occ->recip_def = recip_def;
  357. for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
  358. insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
  359. }
  360. /* Replace the division at USE_P with a multiplication by the reciprocal, if
  361. possible. */
  362. static inline void
  363. replace_reciprocal (use_operand_p use_p)
  364. {
  365. gimple use_stmt = USE_STMT (use_p);
  366. basic_block bb = gimple_bb (use_stmt);
  367. struct occurrence *occ = (struct occurrence *) bb->aux;
  368. if (optimize_bb_for_speed_p (bb)
  369. && occ->recip_def && use_stmt != occ->recip_def_stmt)
  370. {
  371. gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
  372. gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
  373. SET_USE (use_p, occ->recip_def);
  374. fold_stmt_inplace (&gsi);
  375. update_stmt (use_stmt);
  376. }
  377. }
  378. /* Free OCC and return one more "struct occurrence" to be freed. */
  379. static struct occurrence *
  380. free_bb (struct occurrence *occ)
  381. {
  382. struct occurrence *child, *next;
  383. /* First get the two pointers hanging off OCC. */
  384. next = occ->next;
  385. child = occ->children;
  386. occ->bb->aux = NULL;
  387. pool_free (occ_pool, occ);
  388. /* Now ensure that we don't recurse unless it is necessary. */
  389. if (!child)
  390. return next;
  391. else
  392. {
  393. while (next)
  394. next = free_bb (next);
  395. return child;
  396. }
  397. }
  398. /* Look for floating-point divisions among DEF's uses, and try to
  399. replace them by multiplications with the reciprocal. Add
  400. as many statements computing the reciprocal as needed.
  401. DEF must be a GIMPLE register of a floating-point type. */
  402. static void
  403. execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
  404. {
  405. use_operand_p use_p;
  406. imm_use_iterator use_iter;
  407. struct occurrence *occ;
  408. int count = 0, threshold;
  409. gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
  410. FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
  411. {
  412. gimple use_stmt = USE_STMT (use_p);
  413. if (is_division_by (use_stmt, def))
  414. {
  415. register_division_in (gimple_bb (use_stmt));
  416. count++;
  417. }
  418. }
  419. /* Do the expensive part only if we can hope to optimize something. */
  420. threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
  421. if (count >= threshold)
  422. {
  423. gimple use_stmt;
  424. for (occ = occ_head; occ; occ = occ->next)
  425. {
  426. compute_merit (occ);
  427. insert_reciprocals (def_gsi, occ, def, NULL, threshold);
  428. }
  429. FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
  430. {
  431. if (is_division_by (use_stmt, def))
  432. {
  433. FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
  434. replace_reciprocal (use_p);
  435. }
  436. }
  437. }
  438. for (occ = occ_head; occ; )
  439. occ = free_bb (occ);
  440. occ_head = NULL;
  441. }
  442. /* Go through all the floating-point SSA_NAMEs, and call
  443. execute_cse_reciprocals_1 on each of them. */
  444. namespace {
  445. const pass_data pass_data_cse_reciprocals =
  446. {
  447. GIMPLE_PASS, /* type */
  448. "recip", /* name */
  449. OPTGROUP_NONE, /* optinfo_flags */
  450. TV_NONE, /* tv_id */
  451. PROP_ssa, /* properties_required */
  452. 0, /* properties_provided */
  453. 0, /* properties_destroyed */
  454. 0, /* todo_flags_start */
  455. TODO_update_ssa, /* todo_flags_finish */
  456. };
  457. class pass_cse_reciprocals : public gimple_opt_pass
  458. {
  459. public:
  460. pass_cse_reciprocals (gcc::context *ctxt)
  461. : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
  462. {}
  463. /* opt_pass methods: */
  464. virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
  465. virtual unsigned int execute (function *);
  466. }; // class pass_cse_reciprocals
  467. unsigned int
  468. pass_cse_reciprocals::execute (function *fun)
  469. {
  470. basic_block bb;
  471. tree arg;
  472. occ_pool = create_alloc_pool ("dominators for recip",
  473. sizeof (struct occurrence),
  474. n_basic_blocks_for_fn (fun) / 3 + 1);
  475. memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
  476. calculate_dominance_info (CDI_DOMINATORS);
  477. calculate_dominance_info (CDI_POST_DOMINATORS);
  478. #ifdef ENABLE_CHECKING
  479. FOR_EACH_BB_FN (bb, fun)
  480. gcc_assert (!bb->aux);
  481. #endif
  482. for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
  483. if (FLOAT_TYPE_P (TREE_TYPE (arg))
  484. && is_gimple_reg (arg))
  485. {
  486. tree name = ssa_default_def (fun, arg);
  487. if (name)
  488. execute_cse_reciprocals_1 (NULL, name);
  489. }
  490. FOR_EACH_BB_FN (bb, fun)
  491. {
  492. tree def;
  493. for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
  494. gsi_next (&gsi))
  495. {
  496. gphi *phi = gsi.phi ();
  497. def = PHI_RESULT (phi);
  498. if (! virtual_operand_p (def)
  499. && FLOAT_TYPE_P (TREE_TYPE (def)))
  500. execute_cse_reciprocals_1 (NULL, def);
  501. }
  502. for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
  503. gsi_next (&gsi))
  504. {
  505. gimple stmt = gsi_stmt (gsi);
  506. if (gimple_has_lhs (stmt)
  507. && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
  508. && FLOAT_TYPE_P (TREE_TYPE (def))
  509. && TREE_CODE (def) == SSA_NAME)
  510. execute_cse_reciprocals_1 (&gsi, def);
  511. }
  512. if (optimize_bb_for_size_p (bb))
  513. continue;
  514. /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
  515. for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
  516. gsi_next (&gsi))
  517. {
  518. gimple stmt = gsi_stmt (gsi);
  519. tree fndecl;
  520. if (is_gimple_assign (stmt)
  521. && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
  522. {
  523. tree arg1 = gimple_assign_rhs2 (stmt);
  524. gimple stmt1;
  525. if (TREE_CODE (arg1) != SSA_NAME)
  526. continue;
  527. stmt1 = SSA_NAME_DEF_STMT (arg1);
  528. if (is_gimple_call (stmt1)
  529. && gimple_call_lhs (stmt1)
  530. && (fndecl = gimple_call_fndecl (stmt1))
  531. && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
  532. || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
  533. {
  534. enum built_in_function code;
  535. bool md_code, fail;
  536. imm_use_iterator ui;
  537. use_operand_p use_p;
  538. code = DECL_FUNCTION_CODE (fndecl);
  539. md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
  540. fndecl = targetm.builtin_reciprocal (code, md_code, false);
  541. if (!fndecl)
  542. continue;
  543. /* Check that all uses of the SSA name are divisions,
  544. otherwise replacing the defining statement will do
  545. the wrong thing. */
  546. fail = false;
  547. FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
  548. {
  549. gimple stmt2 = USE_STMT (use_p);
  550. if (is_gimple_debug (stmt2))
  551. continue;
  552. if (!is_gimple_assign (stmt2)
  553. || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
  554. || gimple_assign_rhs1 (stmt2) == arg1
  555. || gimple_assign_rhs2 (stmt2) != arg1)
  556. {
  557. fail = true;
  558. break;
  559. }
  560. }
  561. if (fail)
  562. continue;
  563. gimple_replace_ssa_lhs (stmt1, arg1);
  564. gimple_call_set_fndecl (stmt1, fndecl);
  565. update_stmt (stmt1);
  566. reciprocal_stats.rfuncs_inserted++;
  567. FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
  568. {
  569. gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
  570. gimple_assign_set_rhs_code (stmt, MULT_EXPR);
  571. fold_stmt_inplace (&gsi);
  572. update_stmt (stmt);
  573. }
  574. }
  575. }
  576. }
  577. }
  578. statistics_counter_event (fun, "reciprocal divs inserted",
  579. reciprocal_stats.rdivs_inserted);
  580. statistics_counter_event (fun, "reciprocal functions inserted",
  581. reciprocal_stats.rfuncs_inserted);
  582. free_dominance_info (CDI_DOMINATORS);
  583. free_dominance_info (CDI_POST_DOMINATORS);
  584. free_alloc_pool (occ_pool);
  585. return 0;
  586. }
  587. } // anon namespace
  588. gimple_opt_pass *
  589. make_pass_cse_reciprocals (gcc::context *ctxt)
  590. {
  591. return new pass_cse_reciprocals (ctxt);
  592. }
  593. /* Records an occurrence at statement USE_STMT in the vector of trees
  594. STMTS if it is dominated by *TOP_BB or dominates it or this basic block
  595. is not yet initialized. Returns true if the occurrence was pushed on
  596. the vector. Adjusts *TOP_BB to be the basic block dominating all
  597. statements in the vector. */
  598. static bool
  599. maybe_record_sincos (vec<gimple> *stmts,
  600. basic_block *top_bb, gimple use_stmt)
  601. {
  602. basic_block use_bb = gimple_bb (use_stmt);
  603. if (*top_bb
  604. && (*top_bb == use_bb
  605. || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
  606. stmts->safe_push (use_stmt);
  607. else if (!*top_bb
  608. || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
  609. {
  610. stmts->safe_push (use_stmt);
  611. *top_bb = use_bb;
  612. }
  613. else
  614. return false;
  615. return true;
  616. }
  617. /* Look for sin, cos and cexpi calls with the same argument NAME and
  618. create a single call to cexpi CSEing the result in this case.
  619. We first walk over all immediate uses of the argument collecting
  620. statements that we can CSE in a vector and in a second pass replace
  621. the statement rhs with a REALPART or IMAGPART expression on the
  622. result of the cexpi call we insert before the use statement that
  623. dominates all other candidates. */
  624. static bool
  625. execute_cse_sincos_1 (tree name)
  626. {
  627. gimple_stmt_iterator gsi;
  628. imm_use_iterator use_iter;
  629. tree fndecl, res, type;
  630. gimple def_stmt, use_stmt, stmt;
  631. int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
  632. auto_vec<gimple> stmts;
  633. basic_block top_bb = NULL;
  634. int i;
  635. bool cfg_changed = false;
  636. type = TREE_TYPE (name);
  637. FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
  638. {
  639. if (gimple_code (use_stmt) != GIMPLE_CALL
  640. || !gimple_call_lhs (use_stmt)
  641. || !(fndecl = gimple_call_fndecl (use_stmt))
  642. || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
  643. continue;
  644. switch (DECL_FUNCTION_CODE (fndecl))
  645. {
  646. CASE_FLT_FN (BUILT_IN_COS):
  647. seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
  648. break;
  649. CASE_FLT_FN (BUILT_IN_SIN):
  650. seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
  651. break;
  652. CASE_FLT_FN (BUILT_IN_CEXPI):
  653. seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
  654. break;
  655. default:;
  656. }
  657. }
  658. if (seen_cos + seen_sin + seen_cexpi <= 1)
  659. return false;
  660. /* Simply insert cexpi at the beginning of top_bb but not earlier than
  661. the name def statement. */
  662. fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
  663. if (!fndecl)
  664. return false;
  665. stmt = gimple_build_call (fndecl, 1, name);
  666. res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
  667. gimple_call_set_lhs (stmt, res);
  668. def_stmt = SSA_NAME_DEF_STMT (name);
  669. if (!SSA_NAME_IS_DEFAULT_DEF (name)
  670. && gimple_code (def_stmt) != GIMPLE_PHI
  671. && gimple_bb (def_stmt) == top_bb)
  672. {
  673. gsi = gsi_for_stmt (def_stmt);
  674. gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
  675. }
  676. else
  677. {
  678. gsi = gsi_after_labels (top_bb);
  679. gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
  680. }
  681. sincos_stats.inserted++;
  682. /* And adjust the recorded old call sites. */
  683. for (i = 0; stmts.iterate (i, &use_stmt); ++i)
  684. {
  685. tree rhs = NULL;
  686. fndecl = gimple_call_fndecl (use_stmt);
  687. switch (DECL_FUNCTION_CODE (fndecl))
  688. {
  689. CASE_FLT_FN (BUILT_IN_COS):
  690. rhs = fold_build1 (REALPART_EXPR, type, res);
  691. break;
  692. CASE_FLT_FN (BUILT_IN_SIN):
  693. rhs = fold_build1 (IMAGPART_EXPR, type, res);
  694. break;
  695. CASE_FLT_FN (BUILT_IN_CEXPI):
  696. rhs = res;
  697. break;
  698. default:;
  699. gcc_unreachable ();
  700. }
  701. /* Replace call with a copy. */
  702. stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
  703. gsi = gsi_for_stmt (use_stmt);
  704. gsi_replace (&gsi, stmt, true);
  705. if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
  706. cfg_changed = true;
  707. }
  708. return cfg_changed;
  709. }
  710. /* To evaluate powi(x,n), the floating point value x raised to the
  711. constant integer exponent n, we use a hybrid algorithm that
  712. combines the "window method" with look-up tables. For an
  713. introduction to exponentiation algorithms and "addition chains",
  714. see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
  715. "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
  716. 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
  717. Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
  718. /* Provide a default value for POWI_MAX_MULTS, the maximum number of
  719. multiplications to inline before calling the system library's pow
  720. function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
  721. so this default never requires calling pow, powf or powl. */
  722. #ifndef POWI_MAX_MULTS
  723. #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
  724. #endif
  725. /* The size of the "optimal power tree" lookup table. All
  726. exponents less than this value are simply looked up in the
  727. powi_table below. This threshold is also used to size the
  728. cache of pseudo registers that hold intermediate results. */
  729. #define POWI_TABLE_SIZE 256
  730. /* The size, in bits of the window, used in the "window method"
  731. exponentiation algorithm. This is equivalent to a radix of
  732. (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
  733. #define POWI_WINDOW_SIZE 3
  734. /* The following table is an efficient representation of an
  735. "optimal power tree". For each value, i, the corresponding
  736. value, j, in the table states than an optimal evaluation
  737. sequence for calculating pow(x,i) can be found by evaluating
  738. pow(x,j)*pow(x,i-j). An optimal power tree for the first
  739. 100 integers is given in Knuth's "Seminumerical algorithms". */
  740. static const unsigned char powi_table[POWI_TABLE_SIZE] =
  741. {
  742. 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
  743. 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
  744. 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
  745. 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
  746. 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
  747. 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
  748. 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
  749. 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
  750. 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
  751. 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
  752. 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
  753. 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
  754. 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
  755. 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
  756. 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
  757. 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
  758. 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
  759. 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
  760. 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
  761. 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
  762. 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
  763. 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
  764. 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
  765. 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
  766. 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
  767. 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
  768. 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
  769. 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
  770. 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
  771. 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
  772. 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
  773. 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
  774. };
  775. /* Return the number of multiplications required to calculate
  776. powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
  777. subroutine of powi_cost. CACHE is an array indicating
  778. which exponents have already been calculated. */
  779. static int
  780. powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
  781. {
  782. /* If we've already calculated this exponent, then this evaluation
  783. doesn't require any additional multiplications. */
  784. if (cache[n])
  785. return 0;
  786. cache[n] = true;
  787. return powi_lookup_cost (n - powi_table[n], cache)
  788. + powi_lookup_cost (powi_table[n], cache) + 1;
  789. }
  790. /* Return the number of multiplications required to calculate
  791. powi(x,n) for an arbitrary x, given the exponent N. This
  792. function needs to be kept in sync with powi_as_mults below. */
  793. static int
  794. powi_cost (HOST_WIDE_INT n)
  795. {
  796. bool cache[POWI_TABLE_SIZE];
  797. unsigned HOST_WIDE_INT digit;
  798. unsigned HOST_WIDE_INT val;
  799. int result;
  800. if (n == 0)
  801. return 0;
  802. /* Ignore the reciprocal when calculating the cost. */
  803. val = (n < 0) ? -n : n;
  804. /* Initialize the exponent cache. */
  805. memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
  806. cache[1] = true;
  807. result = 0;
  808. while (val >= POWI_TABLE_SIZE)
  809. {
  810. if (val & 1)
  811. {
  812. digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
  813. result += powi_lookup_cost (digit, cache)
  814. + POWI_WINDOW_SIZE + 1;
  815. val >>= POWI_WINDOW_SIZE;
  816. }
  817. else
  818. {
  819. val >>= 1;
  820. result++;
  821. }
  822. }
  823. return result + powi_lookup_cost (val, cache);
  824. }
  825. /* Recursive subroutine of powi_as_mults. This function takes the
  826. array, CACHE, of already calculated exponents and an exponent N and
  827. returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
  828. static tree
  829. powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
  830. HOST_WIDE_INT n, tree *cache)
  831. {
  832. tree op0, op1, ssa_target;
  833. unsigned HOST_WIDE_INT digit;
  834. gassign *mult_stmt;
  835. if (n < POWI_TABLE_SIZE && cache[n])
  836. return cache[n];
  837. ssa_target = make_temp_ssa_name (type, NULL, "powmult");
  838. if (n < POWI_TABLE_SIZE)
  839. {
  840. cache[n] = ssa_target;
  841. op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
  842. op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
  843. }
  844. else if (n & 1)
  845. {
  846. digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
  847. op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
  848. op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
  849. }
  850. else
  851. {
  852. op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
  853. op1 = op0;
  854. }
  855. mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
  856. gimple_set_location (mult_stmt, loc);
  857. gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
  858. return ssa_target;
  859. }
  860. /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
  861. This function needs to be kept in sync with powi_cost above. */
  862. static tree
  863. powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
  864. tree arg0, HOST_WIDE_INT n)
  865. {
  866. tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
  867. gassign *div_stmt;
  868. tree target;
  869. if (n == 0)
  870. return build_real (type, dconst1);
  871. memset (cache, 0, sizeof (cache));
  872. cache[1] = arg0;
  873. result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
  874. if (n >= 0)
  875. return result;
  876. /* If the original exponent was negative, reciprocate the result. */
  877. target = make_temp_ssa_name (type, NULL, "powmult");
  878. div_stmt = gimple_build_assign (target, RDIV_EXPR,
  879. build_real (type, dconst1), result);
  880. gimple_set_location (div_stmt, loc);
  881. gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
  882. return target;
  883. }
  884. /* ARG0 and N are the two arguments to a powi builtin in GSI with
  885. location info LOC. If the arguments are appropriate, create an
  886. equivalent sequence of statements prior to GSI using an optimal
  887. number of multiplications, and return an expession holding the
  888. result. */
  889. static tree
  890. gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
  891. tree arg0, HOST_WIDE_INT n)
  892. {
  893. /* Avoid largest negative number. */
  894. if (n != -n
  895. && ((n >= -1 && n <= 2)
  896. || (optimize_function_for_speed_p (cfun)
  897. && powi_cost (n) <= POWI_MAX_MULTS)))
  898. return powi_as_mults (gsi, loc, arg0, n);
  899. return NULL_TREE;
  900. }
  901. /* Build a gimple call statement that calls FN with argument ARG.
  902. Set the lhs of the call statement to a fresh SSA name. Insert the
  903. statement prior to GSI's current position, and return the fresh
  904. SSA name. */
  905. static tree
  906. build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
  907. tree fn, tree arg)
  908. {
  909. gcall *call_stmt;
  910. tree ssa_target;
  911. call_stmt = gimple_build_call (fn, 1, arg);
  912. ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
  913. gimple_set_lhs (call_stmt, ssa_target);
  914. gimple_set_location (call_stmt, loc);
  915. gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
  916. return ssa_target;
  917. }
  918. /* Build a gimple binary operation with the given CODE and arguments
  919. ARG0, ARG1, assigning the result to a new SSA name for variable
  920. TARGET. Insert the statement prior to GSI's current position, and
  921. return the fresh SSA name.*/
  922. static tree
  923. build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
  924. const char *name, enum tree_code code,
  925. tree arg0, tree arg1)
  926. {
  927. tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
  928. gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
  929. gimple_set_location (stmt, loc);
  930. gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
  931. return result;
  932. }
  933. /* Build a gimple reference operation with the given CODE and argument
  934. ARG, assigning the result to a new SSA name of TYPE with NAME.
  935. Insert the statement prior to GSI's current position, and return
  936. the fresh SSA name. */
  937. static inline tree
  938. build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
  939. const char *name, enum tree_code code, tree arg0)
  940. {
  941. tree result = make_temp_ssa_name (type, NULL, name);
  942. gimple stmt = gimple_build_assign (result, build1 (code, type, arg0));
  943. gimple_set_location (stmt, loc);
  944. gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
  945. return result;
  946. }
  947. /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
  948. prior to GSI's current position, and return the fresh SSA name. */
  949. static tree
  950. build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
  951. tree type, tree val)
  952. {
  953. tree result = make_ssa_name (type);
  954. gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
  955. gimple_set_location (stmt, loc);
  956. gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
  957. return result;
  958. }
  959. /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
  960. with location info LOC. If possible, create an equivalent and
  961. less expensive sequence of statements prior to GSI, and return an
  962. expession holding the result. */
  963. static tree
  964. gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
  965. tree arg0, tree arg1)
  966. {
  967. REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
  968. REAL_VALUE_TYPE c2, dconst3;
  969. HOST_WIDE_INT n;
  970. tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
  971. machine_mode mode;
  972. bool hw_sqrt_exists, c_is_int, c2_is_int;
  973. /* If the exponent isn't a constant, there's nothing of interest
  974. to be done. */
  975. if (TREE_CODE (arg1) != REAL_CST)
  976. return NULL_TREE;
  977. /* If the exponent is equivalent to an integer, expand to an optimal
  978. multiplication sequence when profitable. */
  979. c = TREE_REAL_CST (arg1);
  980. n = real_to_integer (&c);
  981. real_from_integer (&cint, VOIDmode, n, SIGNED);
  982. c_is_int = real_identical (&c, &cint);
  983. if (c_is_int
  984. && ((n >= -1 && n <= 2)
  985. || (flag_unsafe_math_optimizations
  986. && optimize_bb_for_speed_p (gsi_bb (*gsi))
  987. && powi_cost (n) <= POWI_MAX_MULTS)))
  988. return gimple_expand_builtin_powi (gsi, loc, arg0, n);
  989. /* Attempt various optimizations using sqrt and cbrt. */
  990. type = TREE_TYPE (arg0);
  991. mode = TYPE_MODE (type);
  992. sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
  993. /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
  994. unless signed zeros must be maintained. pow(-0,0.5) = +0, while
  995. sqrt(-0) = -0. */
  996. if (sqrtfn
  997. && REAL_VALUES_EQUAL (c, dconsthalf)
  998. && !HONOR_SIGNED_ZEROS (mode))
  999. return build_and_insert_call (gsi, loc, sqrtfn, arg0);
  1000. /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
  1001. a builtin sqrt instruction is smaller than a call to pow with 0.25,
  1002. so do this optimization even if -Os. Don't do this optimization
  1003. if we don't have a hardware sqrt insn. */
  1004. dconst1_4 = dconst1;
  1005. SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
  1006. hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
  1007. if (flag_unsafe_math_optimizations
  1008. && sqrtfn
  1009. && REAL_VALUES_EQUAL (c, dconst1_4)
  1010. && hw_sqrt_exists)
  1011. {
  1012. /* sqrt(x) */
  1013. sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
  1014. /* sqrt(sqrt(x)) */
  1015. return build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
  1016. }
  1017. /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
  1018. optimizing for space. Don't do this optimization if we don't have
  1019. a hardware sqrt insn. */
  1020. real_from_integer (&dconst3_4, VOIDmode, 3, SIGNED);
  1021. SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
  1022. if (flag_unsafe_math_optimizations
  1023. && sqrtfn
  1024. && optimize_function_for_speed_p (cfun)
  1025. && REAL_VALUES_EQUAL (c, dconst3_4)
  1026. && hw_sqrt_exists)
  1027. {
  1028. /* sqrt(x) */
  1029. sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
  1030. /* sqrt(sqrt(x)) */
  1031. sqrt_sqrt = build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
  1032. /* sqrt(x) * sqrt(sqrt(x)) */
  1033. return build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
  1034. sqrt_arg0, sqrt_sqrt);
  1035. }
  1036. /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
  1037. optimizations since 1./3. is not exactly representable. If x
  1038. is negative and finite, the correct value of pow(x,1./3.) is
  1039. a NaN with the "invalid" exception raised, because the value
  1040. of 1./3. actually has an even denominator. The correct value
  1041. of cbrt(x) is a negative real value. */
  1042. cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
  1043. dconst1_3 = real_value_truncate (mode, dconst_third ());
  1044. if (flag_unsafe_math_optimizations
  1045. && cbrtfn
  1046. && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
  1047. && REAL_VALUES_EQUAL (c, dconst1_3))
  1048. return build_and_insert_call (gsi, loc, cbrtfn, arg0);
  1049. /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
  1050. if we don't have a hardware sqrt insn. */
  1051. dconst1_6 = dconst1_3;
  1052. SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
  1053. if (flag_unsafe_math_optimizations
  1054. && sqrtfn
  1055. && cbrtfn
  1056. && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
  1057. && optimize_function_for_speed_p (cfun)
  1058. && hw_sqrt_exists
  1059. && REAL_VALUES_EQUAL (c, dconst1_6))
  1060. {
  1061. /* sqrt(x) */
  1062. sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
  1063. /* cbrt(sqrt(x)) */
  1064. return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
  1065. }
  1066. /* Optimize pow(x,c), where n = 2c for some nonzero integer n
  1067. and c not an integer, into
  1068. sqrt(x) * powi(x, n/2), n > 0;
  1069. 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
  1070. Do not calculate the powi factor when n/2 = 0. */
  1071. real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
  1072. n = real_to_integer (&c2);
  1073. real_from_integer (&cint, VOIDmode, n, SIGNED);
  1074. c2_is_int = real_identical (&c2, &cint);
  1075. if (flag_unsafe_math_optimizations
  1076. && sqrtfn
  1077. && c2_is_int
  1078. && !c_is_int
  1079. && optimize_function_for_speed_p (cfun))
  1080. {
  1081. tree powi_x_ndiv2 = NULL_TREE;
  1082. /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
  1083. possible or profitable, give up. Skip the degenerate case when
  1084. n is 1 or -1, where the result is always 1. */
  1085. if (absu_hwi (n) != 1)
  1086. {
  1087. powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0,
  1088. abs_hwi (n / 2));
  1089. if (!powi_x_ndiv2)
  1090. return NULL_TREE;
  1091. }
  1092. /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
  1093. result of the optimal multiply sequence just calculated. */
  1094. sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
  1095. if (absu_hwi (n) == 1)
  1096. result = sqrt_arg0;
  1097. else
  1098. result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
  1099. sqrt_arg0, powi_x_ndiv2);
  1100. /* If n is negative, reciprocate the result. */
  1101. if (n < 0)
  1102. result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
  1103. build_real (type, dconst1), result);
  1104. return result;
  1105. }
  1106. /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
  1107. powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
  1108. 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
  1109. Do not calculate the first factor when n/3 = 0. As cbrt(x) is
  1110. different from pow(x, 1./3.) due to rounding and behavior with
  1111. negative x, we need to constrain this transformation to unsafe
  1112. math and positive x or finite math. */
  1113. real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
  1114. real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
  1115. real_round (&c2, mode, &c2);
  1116. n = real_to_integer (&c2);
  1117. real_from_integer (&cint, VOIDmode, n, SIGNED);
  1118. real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
  1119. real_convert (&c2, mode, &c2);
  1120. if (flag_unsafe_math_optimizations
  1121. && cbrtfn
  1122. && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
  1123. && real_identical (&c2, &c)
  1124. && !c2_is_int
  1125. && optimize_function_for_speed_p (cfun)
  1126. && powi_cost (n / 3) <= POWI_MAX_MULTS)
  1127. {
  1128. tree powi_x_ndiv3 = NULL_TREE;
  1129. /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
  1130. possible or profitable, give up. Skip the degenerate case when
  1131. abs(n) < 3, where the result is always 1. */
  1132. if (absu_hwi (n) >= 3)
  1133. {
  1134. powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
  1135. abs_hwi (n / 3));
  1136. if (!powi_x_ndiv3)
  1137. return NULL_TREE;
  1138. }
  1139. /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
  1140. as that creates an unnecessary variable. Instead, just produce
  1141. either cbrt(x) or cbrt(x) * cbrt(x). */
  1142. cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
  1143. if (absu_hwi (n) % 3 == 1)
  1144. powi_cbrt_x = cbrt_x;
  1145. else
  1146. powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
  1147. cbrt_x, cbrt_x);
  1148. /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
  1149. if (absu_hwi (n) < 3)
  1150. result = powi_cbrt_x;
  1151. else
  1152. result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
  1153. powi_x_ndiv3, powi_cbrt_x);
  1154. /* If n is negative, reciprocate the result. */
  1155. if (n < 0)
  1156. result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
  1157. build_real (type, dconst1), result);
  1158. return result;
  1159. }
  1160. /* No optimizations succeeded. */
  1161. return NULL_TREE;
  1162. }
  1163. /* ARG is the argument to a cabs builtin call in GSI with location info
  1164. LOC. Create a sequence of statements prior to GSI that calculates
  1165. sqrt(R*R + I*I), where R and I are the real and imaginary components
  1166. of ARG, respectively. Return an expression holding the result. */
  1167. static tree
  1168. gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
  1169. {
  1170. tree real_part, imag_part, addend1, addend2, sum, result;
  1171. tree type = TREE_TYPE (TREE_TYPE (arg));
  1172. tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
  1173. machine_mode mode = TYPE_MODE (type);
  1174. if (!flag_unsafe_math_optimizations
  1175. || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
  1176. || !sqrtfn
  1177. || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
  1178. return NULL_TREE;
  1179. real_part = build_and_insert_ref (gsi, loc, type, "cabs",
  1180. REALPART_EXPR, arg);
  1181. addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
  1182. real_part, real_part);
  1183. imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
  1184. IMAGPART_EXPR, arg);
  1185. addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
  1186. imag_part, imag_part);
  1187. sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
  1188. result = build_and_insert_call (gsi, loc, sqrtfn, sum);
  1189. return result;
  1190. }
  1191. /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
  1192. on the SSA_NAME argument of each of them. Also expand powi(x,n) into
  1193. an optimal number of multiplies, when n is a constant. */
  1194. namespace {
  1195. const pass_data pass_data_cse_sincos =
  1196. {
  1197. GIMPLE_PASS, /* type */
  1198. "sincos", /* name */
  1199. OPTGROUP_NONE, /* optinfo_flags */
  1200. TV_NONE, /* tv_id */
  1201. PROP_ssa, /* properties_required */
  1202. 0, /* properties_provided */
  1203. 0, /* properties_destroyed */
  1204. 0, /* todo_flags_start */
  1205. TODO_update_ssa, /* todo_flags_finish */
  1206. };
  1207. class pass_cse_sincos : public gimple_opt_pass
  1208. {
  1209. public:
  1210. pass_cse_sincos (gcc::context *ctxt)
  1211. : gimple_opt_pass (pass_data_cse_sincos, ctxt)
  1212. {}
  1213. /* opt_pass methods: */
  1214. virtual bool gate (function *)
  1215. {
  1216. /* We no longer require either sincos or cexp, since powi expansion
  1217. piggybacks on this pass. */
  1218. return optimize;
  1219. }
  1220. virtual unsigned int execute (function *);
  1221. }; // class pass_cse_sincos
  1222. unsigned int
  1223. pass_cse_sincos::execute (function *fun)
  1224. {
  1225. basic_block bb;
  1226. bool cfg_changed = false;
  1227. calculate_dominance_info (CDI_DOMINATORS);
  1228. memset (&sincos_stats, 0, sizeof (sincos_stats));
  1229. FOR_EACH_BB_FN (bb, fun)
  1230. {
  1231. gimple_stmt_iterator gsi;
  1232. bool cleanup_eh = false;
  1233. for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  1234. {
  1235. gimple stmt = gsi_stmt (gsi);
  1236. tree fndecl;
  1237. /* Only the last stmt in a bb could throw, no need to call
  1238. gimple_purge_dead_eh_edges if we change something in the middle
  1239. of a basic block. */
  1240. cleanup_eh = false;
  1241. if (is_gimple_call (stmt)
  1242. && gimple_call_lhs (stmt)
  1243. && (fndecl = gimple_call_fndecl (stmt))
  1244. && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
  1245. {
  1246. tree arg, arg0, arg1, result;
  1247. HOST_WIDE_INT n;
  1248. location_t loc;
  1249. switch (DECL_FUNCTION_CODE (fndecl))
  1250. {
  1251. CASE_FLT_FN (BUILT_IN_COS):
  1252. CASE_FLT_FN (BUILT_IN_SIN):
  1253. CASE_FLT_FN (BUILT_IN_CEXPI):
  1254. /* Make sure we have either sincos or cexp. */
  1255. if (!targetm.libc_has_function (function_c99_math_complex)
  1256. && !targetm.libc_has_function (function_sincos))
  1257. break;
  1258. arg = gimple_call_arg (stmt, 0);
  1259. if (TREE_CODE (arg) == SSA_NAME)
  1260. cfg_changed |= execute_cse_sincos_1 (arg);
  1261. break;
  1262. CASE_FLT_FN (BUILT_IN_POW):
  1263. arg0 = gimple_call_arg (stmt, 0);
  1264. arg1 = gimple_call_arg (stmt, 1);
  1265. loc = gimple_location (stmt);
  1266. result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
  1267. if (result)
  1268. {
  1269. tree lhs = gimple_get_lhs (stmt);
  1270. gassign *new_stmt = gimple_build_assign (lhs, result);
  1271. gimple_set_location (new_stmt, loc);
  1272. unlink_stmt_vdef (stmt);
  1273. gsi_replace (&gsi, new_stmt, true);
  1274. cleanup_eh = true;
  1275. if (gimple_vdef (stmt))
  1276. release_ssa_name (gimple_vdef (stmt));
  1277. }
  1278. break;
  1279. CASE_FLT_FN (BUILT_IN_POWI):
  1280. arg0 = gimple_call_arg (stmt, 0);
  1281. arg1 = gimple_call_arg (stmt, 1);
  1282. loc = gimple_location (stmt);
  1283. if (real_minus_onep (arg0))
  1284. {
  1285. tree t0, t1, cond, one, minus_one;
  1286. gassign *stmt;
  1287. t0 = TREE_TYPE (arg0);
  1288. t1 = TREE_TYPE (arg1);
  1289. one = build_real (t0, dconst1);
  1290. minus_one = build_real (t0, dconstm1);
  1291. cond = make_temp_ssa_name (t1, NULL, "powi_cond");
  1292. stmt = gimple_build_assign (cond, BIT_AND_EXPR,
  1293. arg1, build_int_cst (t1, 1));
  1294. gimple_set_location (stmt, loc);
  1295. gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
  1296. result = make_temp_ssa_name (t0, NULL, "powi");
  1297. stmt = gimple_build_assign (result, COND_EXPR, cond,
  1298. minus_one, one);
  1299. gimple_set_location (stmt, loc);
  1300. gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
  1301. }
  1302. else
  1303. {
  1304. if (!tree_fits_shwi_p (arg1))
  1305. break;
  1306. n = tree_to_shwi (arg1);
  1307. result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
  1308. }
  1309. if (result)
  1310. {
  1311. tree lhs = gimple_get_lhs (stmt);
  1312. gassign *new_stmt = gimple_build_assign (lhs, result);
  1313. gimple_set_location (new_stmt, loc);
  1314. unlink_stmt_vdef (stmt);
  1315. gsi_replace (&gsi, new_stmt, true);
  1316. cleanup_eh = true;
  1317. if (gimple_vdef (stmt))
  1318. release_ssa_name (gimple_vdef (stmt));
  1319. }
  1320. break;
  1321. CASE_FLT_FN (BUILT_IN_CABS):
  1322. arg0 = gimple_call_arg (stmt, 0);
  1323. loc = gimple_location (stmt);
  1324. result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
  1325. if (result)
  1326. {
  1327. tree lhs = gimple_get_lhs (stmt);
  1328. gassign *new_stmt = gimple_build_assign (lhs, result);
  1329. gimple_set_location (new_stmt, loc);
  1330. unlink_stmt_vdef (stmt);
  1331. gsi_replace (&gsi, new_stmt, true);
  1332. cleanup_eh = true;
  1333. if (gimple_vdef (stmt))
  1334. release_ssa_name (gimple_vdef (stmt));
  1335. }
  1336. break;
  1337. default:;
  1338. }
  1339. }
  1340. }
  1341. if (cleanup_eh)
  1342. cfg_changed |= gimple_purge_dead_eh_edges (bb);
  1343. }
  1344. statistics_counter_event (fun, "sincos statements inserted",
  1345. sincos_stats.inserted);
  1346. free_dominance_info (CDI_DOMINATORS);
  1347. return cfg_changed ? TODO_cleanup_cfg : 0;
  1348. }
  1349. } // anon namespace
  1350. gimple_opt_pass *
  1351. make_pass_cse_sincos (gcc::context *ctxt)
  1352. {
  1353. return new pass_cse_sincos (ctxt);
  1354. }
  1355. /* A symbolic number is used to detect byte permutation and selection
  1356. patterns. Therefore the field N contains an artificial number
  1357. consisting of octet sized markers:
  1358. 0 - target byte has the value 0
  1359. FF - target byte has an unknown value (eg. due to sign extension)
  1360. 1..size - marker value is the target byte index minus one.
  1361. To detect permutations on memory sources (arrays and structures), a symbolic
  1362. number is also associated a base address (the array or structure the load is
  1363. made from), an offset from the base address and a range which gives the
  1364. difference between the highest and lowest accessed memory location to make
  1365. such a symbolic number. The range is thus different from size which reflects
  1366. the size of the type of current expression. Note that for non memory source,
  1367. range holds the same value as size.
  1368. For instance, for an array char a[], (short) a[0] | (short) a[3] would have
  1369. a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
  1370. still have a size of 2 but this time a range of 1. */
  1371. struct symbolic_number {
  1372. uint64_t n;
  1373. tree type;
  1374. tree base_addr;
  1375. tree offset;
  1376. HOST_WIDE_INT bytepos;
  1377. tree alias_set;
  1378. tree vuse;
  1379. unsigned HOST_WIDE_INT range;
  1380. };
  1381. #define BITS_PER_MARKER 8
  1382. #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
  1383. #define MARKER_BYTE_UNKNOWN MARKER_MASK
  1384. #define HEAD_MARKER(n, size) \
  1385. ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
  1386. /* The number which the find_bswap_or_nop_1 result should match in
  1387. order to have a nop. The number is masked according to the size of
  1388. the symbolic number before using it. */
  1389. #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
  1390. (uint64_t)0x08070605 << 32 | 0x04030201)
  1391. /* The number which the find_bswap_or_nop_1 result should match in
  1392. order to have a byte swap. The number is masked according to the
  1393. size of the symbolic number before using it. */
  1394. #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
  1395. (uint64_t)0x01020304 << 32 | 0x05060708)
  1396. /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
  1397. number N. Return false if the requested operation is not permitted
  1398. on a symbolic number. */
  1399. static inline bool
  1400. do_shift_rotate (enum tree_code code,
  1401. struct symbolic_number *n,
  1402. int count)
  1403. {
  1404. int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
  1405. unsigned head_marker;
  1406. if (count % BITS_PER_UNIT != 0)
  1407. return false;
  1408. count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
  1409. /* Zero out the extra bits of N in order to avoid them being shifted
  1410. into the significant bits. */
  1411. if (size < 64 / BITS_PER_MARKER)
  1412. n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
  1413. switch (code)
  1414. {
  1415. case LSHIFT_EXPR:
  1416. n->n <<= count;
  1417. break;
  1418. case RSHIFT_EXPR:
  1419. head_marker = HEAD_MARKER (n->n, size);
  1420. n->n >>= count;
  1421. /* Arithmetic shift of signed type: result is dependent on the value. */
  1422. if (!TYPE_UNSIGNED (n->type) && head_marker)
  1423. for (i = 0; i < count / BITS_PER_MARKER; i++)
  1424. n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
  1425. << ((size - 1 - i) * BITS_PER_MARKER);
  1426. break;
  1427. case LROTATE_EXPR:
  1428. n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
  1429. break;
  1430. case RROTATE_EXPR:
  1431. n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
  1432. break;
  1433. default:
  1434. return false;
  1435. }
  1436. /* Zero unused bits for size. */
  1437. if (size < 64 / BITS_PER_MARKER)
  1438. n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
  1439. return true;
  1440. }
  1441. /* Perform sanity checking for the symbolic number N and the gimple
  1442. statement STMT. */
  1443. static inline bool
  1444. verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
  1445. {
  1446. tree lhs_type;
  1447. lhs_type = gimple_expr_type (stmt);
  1448. if (TREE_CODE (lhs_type) != INTEGER_TYPE)
  1449. return false;
  1450. if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
  1451. return false;
  1452. return true;
  1453. }
  1454. /* Initialize the symbolic number N for the bswap pass from the base element
  1455. SRC manipulated by the bitwise OR expression. */
  1456. static bool
  1457. init_symbolic_number (struct symbolic_number *n, tree src)
  1458. {
  1459. int size;
  1460. n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
  1461. /* Set up the symbolic number N by setting each byte to a value between 1 and
  1462. the byte size of rhs1. The highest order byte is set to n->size and the
  1463. lowest order byte to 1. */
  1464. n->type = TREE_TYPE (src);
  1465. size = TYPE_PRECISION (n->type);
  1466. if (size % BITS_PER_UNIT != 0)
  1467. return false;
  1468. size /= BITS_PER_UNIT;
  1469. if (size > 64 / BITS_PER_MARKER)
  1470. return false;
  1471. n->range = size;
  1472. n->n = CMPNOP;
  1473. if (size < 64 / BITS_PER_MARKER)
  1474. n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
  1475. return true;
  1476. }
  1477. /* Check if STMT might be a byte swap or a nop from a memory source and returns
  1478. the answer. If so, REF is that memory source and the base of the memory area
  1479. accessed and the offset of the access from that base are recorded in N. */
  1480. bool
  1481. find_bswap_or_nop_load (gimple stmt, tree ref, struct symbolic_number *n)
  1482. {
  1483. /* Leaf node is an array or component ref. Memorize its base and
  1484. offset from base to compare to other such leaf node. */
  1485. HOST_WIDE_INT bitsize, bitpos;
  1486. machine_mode mode;
  1487. int unsignedp, volatilep;
  1488. tree offset, base_addr;
  1489. /* Not prepared to handle PDP endian. */
  1490. if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
  1491. return false;
  1492. if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
  1493. return false;
  1494. base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
  1495. &unsignedp, &volatilep, false);
  1496. if (TREE_CODE (base_addr) == MEM_REF)
  1497. {
  1498. offset_int bit_offset = 0;
  1499. tree off = TREE_OPERAND (base_addr, 1);
  1500. if (!integer_zerop (off))
  1501. {
  1502. offset_int boff, coff = mem_ref_offset (base_addr);
  1503. boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
  1504. bit_offset += boff;
  1505. }
  1506. base_addr = TREE_OPERAND (base_addr, 0);
  1507. /* Avoid returning a negative bitpos as this may wreak havoc later. */
  1508. if (wi::neg_p (bit_offset))
  1509. {
  1510. offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
  1511. offset_int tem = bit_offset.and_not (mask);
  1512. /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
  1513. Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
  1514. bit_offset -= tem;
  1515. tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
  1516. if (offset)
  1517. offset = size_binop (PLUS_EXPR, offset,
  1518. wide_int_to_tree (sizetype, tem));
  1519. else
  1520. offset = wide_int_to_tree (sizetype, tem);
  1521. }
  1522. bitpos += bit_offset.to_shwi ();
  1523. }
  1524. if (bitpos % BITS_PER_UNIT)
  1525. return false;
  1526. if (bitsize % BITS_PER_UNIT)
  1527. return false;
  1528. if (!init_symbolic_number (n, ref))
  1529. return false;
  1530. n->base_addr = base_addr;
  1531. n->offset = offset;
  1532. n->bytepos = bitpos / BITS_PER_UNIT;
  1533. n->alias_set = reference_alias_ptr_type (ref);
  1534. n->vuse = gimple_vuse (stmt);
  1535. return true;
  1536. }
  1537. /* Compute the symbolic number N representing the result of a bitwise OR on 2
  1538. symbolic number N1 and N2 whose source statements are respectively
  1539. SOURCE_STMT1 and SOURCE_STMT2. */
  1540. static gimple
  1541. perform_symbolic_merge (gimple source_stmt1, struct symbolic_number *n1,
  1542. gimple source_stmt2, struct symbolic_number *n2,
  1543. struct symbolic_number *n)
  1544. {
  1545. int i, size;
  1546. uint64_t mask;
  1547. gimple source_stmt;
  1548. struct symbolic_number *n_start;
  1549. /* Sources are different, cancel bswap if they are not memory location with
  1550. the same base (array, structure, ...). */
  1551. if (gimple_assign_rhs1 (source_stmt1) != gimple_assign_rhs1 (source_stmt2))
  1552. {
  1553. int64_t inc;
  1554. HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
  1555. struct symbolic_number *toinc_n_ptr, *n_end;
  1556. if (!n1->base_addr || !n2->base_addr
  1557. || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
  1558. return NULL;
  1559. if (!n1->offset != !n2->offset
  1560. || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
  1561. return NULL;
  1562. if (n1->bytepos < n2->bytepos)
  1563. {
  1564. n_start = n1;
  1565. start_sub = n2->bytepos - n1->bytepos;
  1566. source_stmt = source_stmt1;
  1567. }
  1568. else
  1569. {
  1570. n_start = n2;
  1571. start_sub = n1->bytepos - n2->bytepos;
  1572. source_stmt = source_stmt2;
  1573. }
  1574. /* Find the highest address at which a load is performed and
  1575. compute related info. */
  1576. end1 = n1->bytepos + (n1->range - 1);
  1577. end2 = n2->bytepos + (n2->range - 1);
  1578. if (end1 < end2)
  1579. {
  1580. end = end2;
  1581. end_sub = end2 - end1;
  1582. }
  1583. else
  1584. {
  1585. end = end1;
  1586. end_sub = end1 - end2;
  1587. }
  1588. n_end = (end2 > end1) ? n2 : n1;
  1589. /* Find symbolic number whose lsb is the most significant. */
  1590. if (BYTES_BIG_ENDIAN)
  1591. toinc_n_ptr = (n_end == n1) ? n2 : n1;
  1592. else
  1593. toinc_n_ptr = (n_start == n1) ? n2 : n1;
  1594. n->range = end - n_start->bytepos + 1;
  1595. /* Check that the range of memory covered can be represented by
  1596. a symbolic number. */
  1597. if (n->range > 64 / BITS_PER_MARKER)
  1598. return NULL;
  1599. /* Reinterpret byte marks in symbolic number holding the value of
  1600. bigger weight according to target endianness. */
  1601. inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
  1602. size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
  1603. for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
  1604. {
  1605. unsigned marker
  1606. = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
  1607. if (marker && marker != MARKER_BYTE_UNKNOWN)
  1608. toinc_n_ptr->n += inc;
  1609. }
  1610. }
  1611. else
  1612. {
  1613. n->range = n1->range;
  1614. n_start = n1;
  1615. source_stmt = source_stmt1;
  1616. }
  1617. if (!n1->alias_set
  1618. || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
  1619. n->alias_set = n1->alias_set;
  1620. else
  1621. n->alias_set = ptr_type_node;
  1622. n->vuse = n_start->vuse;
  1623. n->base_addr = n_start->base_addr;
  1624. n->offset = n_start->offset;
  1625. n->bytepos = n_start->bytepos;
  1626. n->type = n_start->type;
  1627. size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
  1628. for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
  1629. {
  1630. uint64_t masked1, masked2;
  1631. masked1 = n1->n & mask;
  1632. masked2 = n2->n & mask;
  1633. if (masked1 && masked2 && masked1 != masked2)
  1634. return NULL;
  1635. }
  1636. n->n = n1->n | n2->n;
  1637. return source_stmt;
  1638. }
  1639. /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
  1640. the operation given by the rhs of STMT on the result. If the operation
  1641. could successfully be executed the function returns a gimple stmt whose
  1642. rhs's first tree is the expression of the source operand and NULL
  1643. otherwise. */
  1644. static gimple
  1645. find_bswap_or_nop_1 (gimple stmt, struct symbolic_number *n, int limit)
  1646. {
  1647. enum tree_code code;
  1648. tree rhs1, rhs2 = NULL;
  1649. gimple rhs1_stmt, rhs2_stmt, source_stmt1;
  1650. enum gimple_rhs_class rhs_class;
  1651. if (!limit || !is_gimple_assign (stmt))
  1652. return NULL;
  1653. rhs1 = gimple_assign_rhs1 (stmt);
  1654. if (find_bswap_or_nop_load (stmt, rhs1, n))
  1655. return stmt;
  1656. if (TREE_CODE (rhs1) != SSA_NAME)
  1657. return NULL;
  1658. code = gimple_assign_rhs_code (stmt);
  1659. rhs_class = gimple_assign_rhs_class (stmt);
  1660. rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
  1661. if (rhs_class == GIMPLE_BINARY_RHS)
  1662. rhs2 = gimple_assign_rhs2 (stmt);
  1663. /* Handle unary rhs and binary rhs with integer constants as second
  1664. operand. */
  1665. if (rhs_class == GIMPLE_UNARY_RHS
  1666. || (rhs_class == GIMPLE_BINARY_RHS
  1667. && TREE_CODE (rhs2) == INTEGER_CST))
  1668. {
  1669. if (code != BIT_AND_EXPR
  1670. && code != LSHIFT_EXPR
  1671. && code != RSHIFT_EXPR
  1672. && code != LROTATE_EXPR
  1673. && code != RROTATE_EXPR
  1674. && !CONVERT_EXPR_CODE_P (code))
  1675. return NULL;
  1676. source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
  1677. /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
  1678. we have to initialize the symbolic number. */
  1679. if (!source_stmt1)
  1680. {
  1681. if (gimple_assign_load_p (stmt)
  1682. || !init_symbolic_number (n, rhs1))
  1683. return NULL;
  1684. source_stmt1 = stmt;
  1685. }
  1686. switch (code)
  1687. {
  1688. case BIT_AND_EXPR:
  1689. {
  1690. int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
  1691. uint64_t val = int_cst_value (rhs2), mask = 0;
  1692. uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
  1693. /* Only constants masking full bytes are allowed. */
  1694. for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
  1695. if ((val & tmp) != 0 && (val & tmp) != tmp)
  1696. return NULL;
  1697. else if (val & tmp)
  1698. mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
  1699. n->n &= mask;
  1700. }
  1701. break;
  1702. case LSHIFT_EXPR:
  1703. case RSHIFT_EXPR:
  1704. case LROTATE_EXPR:
  1705. case RROTATE_EXPR:
  1706. if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
  1707. return NULL;
  1708. break;
  1709. CASE_CONVERT:
  1710. {
  1711. int i, type_size, old_type_size;
  1712. tree type;
  1713. type = gimple_expr_type (stmt);
  1714. type_size = TYPE_PRECISION (type);
  1715. if (type_size % BITS_PER_UNIT != 0)
  1716. return NULL;
  1717. type_size /= BITS_PER_UNIT;
  1718. if (type_size > 64 / BITS_PER_MARKER)
  1719. return NULL;
  1720. /* Sign extension: result is dependent on the value. */
  1721. old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
  1722. if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
  1723. && HEAD_MARKER (n->n, old_type_size))
  1724. for (i = 0; i < type_size - old_type_size; i++)
  1725. n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
  1726. << ((type_size - 1 - i) * BITS_PER_MARKER);
  1727. if (type_size < 64 / BITS_PER_MARKER)
  1728. {
  1729. /* If STMT casts to a smaller type mask out the bits not
  1730. belonging to the target type. */
  1731. n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
  1732. }
  1733. n->type = type;
  1734. if (!n->base_addr)
  1735. n->range = type_size;
  1736. }
  1737. break;
  1738. default:
  1739. return NULL;
  1740. };
  1741. return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
  1742. }
  1743. /* Handle binary rhs. */
  1744. if (rhs_class == GIMPLE_BINARY_RHS)
  1745. {
  1746. struct symbolic_number n1, n2;
  1747. gimple source_stmt, source_stmt2;
  1748. if (code != BIT_IOR_EXPR)
  1749. return NULL;
  1750. if (TREE_CODE (rhs2) != SSA_NAME)
  1751. return NULL;
  1752. rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
  1753. switch (code)
  1754. {
  1755. case BIT_IOR_EXPR:
  1756. source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
  1757. if (!source_stmt1)
  1758. return NULL;
  1759. source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
  1760. if (!source_stmt2)
  1761. return NULL;
  1762. if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
  1763. return NULL;
  1764. if (!n1.vuse != !n2.vuse
  1765. || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
  1766. return NULL;
  1767. source_stmt
  1768. = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
  1769. if (!source_stmt)
  1770. return NULL;
  1771. if (!verify_symbolic_number_p (n, stmt))
  1772. return NULL;
  1773. break;
  1774. default:
  1775. return NULL;
  1776. }
  1777. return source_stmt;
  1778. }
  1779. return NULL;
  1780. }
  1781. /* Check if STMT completes a bswap implementation or a read in a given
  1782. endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
  1783. accordingly. It also sets N to represent the kind of operations
  1784. performed: size of the resulting expression and whether it works on
  1785. a memory source, and if so alias-set and vuse. At last, the
  1786. function returns a stmt whose rhs's first tree is the source
  1787. expression. */
  1788. static gimple
  1789. find_bswap_or_nop (gimple stmt, struct symbolic_number *n, bool *bswap)
  1790. {
  1791. /* The number which the find_bswap_or_nop_1 result should match in order
  1792. to have a full byte swap. The number is shifted to the right
  1793. according to the size of the symbolic number before using it. */
  1794. uint64_t cmpxchg = CMPXCHG;
  1795. uint64_t cmpnop = CMPNOP;
  1796. gimple source_stmt;
  1797. int limit;
  1798. /* The last parameter determines the depth search limit. It usually
  1799. correlates directly to the number n of bytes to be touched. We
  1800. increase that number by log2(n) + 1 here in order to also
  1801. cover signed -> unsigned conversions of the src operand as can be seen
  1802. in libgcc, and for initial shift/and operation of the src operand. */
  1803. limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
  1804. limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
  1805. source_stmt = find_bswap_or_nop_1 (stmt, n, limit);
  1806. if (!source_stmt)
  1807. return NULL;
  1808. /* Find real size of result (highest non-zero byte). */
  1809. if (n->base_addr)
  1810. {
  1811. int rsize;
  1812. uint64_t tmpn;
  1813. for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
  1814. n->range = rsize;
  1815. }
  1816. /* Zero out the extra bits of N and CMP*. */
  1817. if (n->range < (int) sizeof (int64_t))
  1818. {
  1819. uint64_t mask;
  1820. mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
  1821. cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
  1822. cmpnop &= mask;
  1823. }
  1824. /* A complete byte swap should make the symbolic number to start with
  1825. the largest digit in the highest order byte. Unchanged symbolic
  1826. number indicates a read with same endianness as target architecture. */
  1827. if (n->n == cmpnop)
  1828. *bswap = false;
  1829. else if (n->n == cmpxchg)
  1830. *bswap = true;
  1831. else
  1832. return NULL;
  1833. /* Useless bit manipulation performed by code. */
  1834. if (!n->base_addr && n->n == cmpnop)
  1835. return NULL;
  1836. n->range *= BITS_PER_UNIT;
  1837. return source_stmt;
  1838. }
  1839. namespace {
  1840. const pass_data pass_data_optimize_bswap =
  1841. {
  1842. GIMPLE_PASS, /* type */
  1843. "bswap", /* name */
  1844. OPTGROUP_NONE, /* optinfo_flags */
  1845. TV_NONE, /* tv_id */
  1846. PROP_ssa, /* properties_required */
  1847. 0, /* properties_provided */
  1848. 0, /* properties_destroyed */
  1849. 0, /* todo_flags_start */
  1850. 0, /* todo_flags_finish */
  1851. };
  1852. class pass_optimize_bswap : public gimple_opt_pass
  1853. {
  1854. public:
  1855. pass_optimize_bswap (gcc::context *ctxt)
  1856. : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
  1857. {}
  1858. /* opt_pass methods: */
  1859. virtual bool gate (function *)
  1860. {
  1861. return flag_expensive_optimizations && optimize;
  1862. }
  1863. virtual unsigned int execute (function *);
  1864. }; // class pass_optimize_bswap
  1865. /* Perform the bswap optimization: replace the expression computed in the rhs
  1866. of CUR_STMT by an equivalent bswap, load or load + bswap expression.
  1867. Which of these alternatives replace the rhs is given by N->base_addr (non
  1868. null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
  1869. load to perform are also given in N while the builtin bswap invoke is given
  1870. in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
  1871. load statements involved to construct the rhs in CUR_STMT and N->range gives
  1872. the size of the rhs expression for maintaining some statistics.
  1873. Note that if the replacement involve a load, CUR_STMT is moved just after
  1874. SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
  1875. changing of basic block. */
  1876. static bool
  1877. bswap_replace (gimple cur_stmt, gimple src_stmt, tree fndecl, tree bswap_type,
  1878. tree load_type, struct symbolic_number *n, bool bswap)
  1879. {
  1880. gimple_stmt_iterator gsi;
  1881. tree src, tmp, tgt;
  1882. gimple bswap_stmt;
  1883. gsi = gsi_for_stmt (cur_stmt);
  1884. src = gimple_assign_rhs1 (src_stmt);
  1885. tgt = gimple_assign_lhs (cur_stmt);
  1886. /* Need to load the value from memory first. */
  1887. if (n->base_addr)
  1888. {
  1889. gimple_stmt_iterator gsi_ins = gsi_for_stmt (src_stmt);
  1890. tree addr_expr, addr_tmp, val_expr, val_tmp;
  1891. tree load_offset_ptr, aligned_load_type;
  1892. gimple addr_stmt, load_stmt;
  1893. unsigned align;
  1894. HOST_WIDE_INT load_offset = 0;
  1895. align = get_object_alignment (src);
  1896. /* If the new access is smaller than the original one, we need
  1897. to perform big endian adjustment. */
  1898. if (BYTES_BIG_ENDIAN)
  1899. {
  1900. HOST_WIDE_INT bitsize, bitpos;
  1901. machine_mode mode;
  1902. int unsignedp, volatilep;
  1903. tree offset;
  1904. get_inner_reference (src, &bitsize, &bitpos, &offset, &mode,
  1905. &unsignedp, &volatilep, false);
  1906. if (n->range < (unsigned HOST_WIDE_INT) bitsize)
  1907. {
  1908. load_offset = (bitsize - n->range) / BITS_PER_UNIT;
  1909. unsigned HOST_WIDE_INT l
  1910. = (load_offset * BITS_PER_UNIT) & (align - 1);
  1911. if (l)
  1912. align = l & -l;
  1913. }
  1914. }
  1915. if (bswap
  1916. && align < GET_MODE_ALIGNMENT (TYPE_MODE (load_type))
  1917. && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type), align))
  1918. return false;
  1919. /* Move cur_stmt just before one of the load of the original
  1920. to ensure it has the same VUSE. See PR61517 for what could
  1921. go wrong. */
  1922. gsi_move_before (&gsi, &gsi_ins);
  1923. gsi = gsi_for_stmt (cur_stmt);
  1924. /* Compute address to load from and cast according to the size
  1925. of the load. */
  1926. addr_expr = build_fold_addr_expr (unshare_expr (src));
  1927. if (is_gimple_mem_ref_addr (addr_expr))
  1928. addr_tmp = addr_expr;
  1929. else
  1930. {
  1931. addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
  1932. "load_src");
  1933. addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
  1934. gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
  1935. }
  1936. /* Perform the load. */
  1937. aligned_load_type = load_type;
  1938. if (align < TYPE_ALIGN (load_type))
  1939. aligned_load_type = build_aligned_type (load_type, align);
  1940. load_offset_ptr = build_int_cst (n->alias_set, load_offset);
  1941. val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
  1942. load_offset_ptr);
  1943. if (!bswap)
  1944. {
  1945. if (n->range == 16)
  1946. nop_stats.found_16bit++;
  1947. else if (n->range == 32)
  1948. nop_stats.found_32bit++;
  1949. else
  1950. {
  1951. gcc_assert (n->range == 64);
  1952. nop_stats.found_64bit++;
  1953. }
  1954. /* Convert the result of load if necessary. */
  1955. if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
  1956. {
  1957. val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
  1958. "load_dst");
  1959. load_stmt = gimple_build_assign (val_tmp, val_expr);
  1960. gimple_set_vuse (load_stmt, n->vuse);
  1961. gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
  1962. gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
  1963. }
  1964. else
  1965. {
  1966. gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
  1967. gimple_set_vuse (cur_stmt, n->vuse);
  1968. }
  1969. update_stmt (cur_stmt);
  1970. if (dump_file)
  1971. {
  1972. fprintf (dump_file,
  1973. "%d bit load in target endianness found at: ",
  1974. (int) n->range);
  1975. print_gimple_stmt (dump_file, cur_stmt, 0, 0);
  1976. }
  1977. return true;
  1978. }
  1979. else
  1980. {
  1981. val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
  1982. load_stmt = gimple_build_assign (val_tmp, val_expr);
  1983. gimple_set_vuse (load_stmt, n->vuse);
  1984. gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
  1985. }
  1986. src = val_tmp;
  1987. }
  1988. if (n->range == 16)
  1989. bswap_stats.found_16bit++;
  1990. else if (n->range == 32)
  1991. bswap_stats.found_32bit++;
  1992. else
  1993. {
  1994. gcc_assert (n->range == 64);
  1995. bswap_stats.found_64bit++;
  1996. }
  1997. tmp = src;
  1998. /* Convert the src expression if necessary. */
  1999. if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
  2000. {
  2001. gimple convert_stmt;
  2002. tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
  2003. convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
  2004. gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
  2005. }
  2006. /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
  2007. are considered as rotation of 2N bit values by N bits is generally not
  2008. equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
  2009. gives 0x03040102 while a bswap for that value is 0x04030201. */
  2010. if (bswap && n->range == 16)
  2011. {
  2012. tree count = build_int_cst (NULL, BITS_PER_UNIT);
  2013. src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
  2014. bswap_stmt = gimple_build_assign (NULL, src);
  2015. }
  2016. else
  2017. bswap_stmt = gimple_build_call (fndecl, 1, tmp);
  2018. tmp = tgt;
  2019. /* Convert the result if necessary. */
  2020. if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
  2021. {
  2022. gimple convert_stmt;
  2023. tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
  2024. convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
  2025. gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
  2026. }
  2027. gimple_set_lhs (bswap_stmt, tmp);
  2028. if (dump_file)
  2029. {
  2030. fprintf (dump_file, "%d bit bswap implementation found at: ",
  2031. (int) n->range);
  2032. print_gimple_stmt (dump_file, cur_stmt, 0, 0);
  2033. }
  2034. gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
  2035. gsi_remove (&gsi, true);
  2036. return true;
  2037. }
  2038. /* Find manual byte swap implementations as well as load in a given
  2039. endianness. Byte swaps are turned into a bswap builtin invokation
  2040. while endian loads are converted to bswap builtin invokation or
  2041. simple load according to the target endianness. */
  2042. unsigned int
  2043. pass_optimize_bswap::execute (function *fun)
  2044. {
  2045. basic_block bb;
  2046. bool bswap32_p, bswap64_p;
  2047. bool changed = false;
  2048. tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
  2049. if (BITS_PER_UNIT != 8)
  2050. return 0;
  2051. bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
  2052. && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
  2053. bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
  2054. && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
  2055. || (bswap32_p && word_mode == SImode)));
  2056. /* Determine the argument type of the builtins. The code later on
  2057. assumes that the return and argument type are the same. */
  2058. if (bswap32_p)
  2059. {
  2060. tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
  2061. bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
  2062. }
  2063. if (bswap64_p)
  2064. {
  2065. tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
  2066. bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
  2067. }
  2068. memset (&nop_stats, 0, sizeof (nop_stats));
  2069. memset (&bswap_stats, 0, sizeof (bswap_stats));
  2070. FOR_EACH_BB_FN (bb, fun)
  2071. {
  2072. gimple_stmt_iterator gsi;
  2073. /* We do a reverse scan for bswap patterns to make sure we get the
  2074. widest match. As bswap pattern matching doesn't handle previously
  2075. inserted smaller bswap replacements as sub-patterns, the wider
  2076. variant wouldn't be detected. */
  2077. for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
  2078. {
  2079. gimple src_stmt, cur_stmt = gsi_stmt (gsi);
  2080. tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
  2081. enum tree_code code;
  2082. struct symbolic_number n;
  2083. bool bswap;
  2084. /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
  2085. might be moved to a different basic block by bswap_replace and gsi
  2086. must not points to it if that's the case. Moving the gsi_prev
  2087. there make sure that gsi points to the statement previous to
  2088. cur_stmt while still making sure that all statements are
  2089. considered in this basic block. */
  2090. gsi_prev (&gsi);
  2091. if (!is_gimple_assign (cur_stmt))
  2092. continue;
  2093. code = gimple_assign_rhs_code (cur_stmt);
  2094. switch (code)
  2095. {
  2096. case LROTATE_EXPR:
  2097. case RROTATE_EXPR:
  2098. if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
  2099. || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
  2100. % BITS_PER_UNIT)
  2101. continue;
  2102. /* Fall through. */
  2103. case BIT_IOR_EXPR:
  2104. break;
  2105. default:
  2106. continue;
  2107. }
  2108. src_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
  2109. if (!src_stmt)
  2110. continue;
  2111. switch (n.range)
  2112. {
  2113. case 16:
  2114. /* Already in canonical form, nothing to do. */
  2115. if (code == LROTATE_EXPR || code == RROTATE_EXPR)
  2116. continue;
  2117. load_type = bswap_type = uint16_type_node;
  2118. break;
  2119. case 32:
  2120. load_type = uint32_type_node;
  2121. if (bswap32_p)
  2122. {
  2123. fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
  2124. bswap_type = bswap32_type;
  2125. }
  2126. break;
  2127. case 64:
  2128. load_type = uint64_type_node;
  2129. if (bswap64_p)
  2130. {
  2131. fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
  2132. bswap_type = bswap64_type;
  2133. }
  2134. break;
  2135. default:
  2136. continue;
  2137. }
  2138. if (bswap && !fndecl && n.range != 16)
  2139. continue;
  2140. if (bswap_replace (cur_stmt, src_stmt, fndecl, bswap_type, load_type,
  2141. &n, bswap))
  2142. changed = true;
  2143. }
  2144. }
  2145. statistics_counter_event (fun, "16-bit nop implementations found",
  2146. nop_stats.found_16bit);
  2147. statistics_counter_event (fun, "32-bit nop implementations found",
  2148. nop_stats.found_32bit);
  2149. statistics_counter_event (fun, "64-bit nop implementations found",
  2150. nop_stats.found_64bit);
  2151. statistics_counter_event (fun, "16-bit bswap implementations found",
  2152. bswap_stats.found_16bit);
  2153. statistics_counter_event (fun, "32-bit bswap implementations found",
  2154. bswap_stats.found_32bit);
  2155. statistics_counter_event (fun, "64-bit bswap implementations found",
  2156. bswap_stats.found_64bit);
  2157. return (changed ? TODO_update_ssa : 0);
  2158. }
  2159. } // anon namespace
  2160. gimple_opt_pass *
  2161. make_pass_optimize_bswap (gcc::context *ctxt)
  2162. {
  2163. return new pass_optimize_bswap (ctxt);
  2164. }
  2165. /* Return true if stmt is a type conversion operation that can be stripped
  2166. when used in a widening multiply operation. */
  2167. static bool
  2168. widening_mult_conversion_strippable_p (tree result_type, gimple stmt)
  2169. {
  2170. enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
  2171. if (TREE_CODE (result_type) == INTEGER_TYPE)
  2172. {
  2173. tree op_type;
  2174. tree inner_op_type;
  2175. if (!CONVERT_EXPR_CODE_P (rhs_code))
  2176. return false;
  2177. op_type = TREE_TYPE (gimple_assign_lhs (stmt));
  2178. /* If the type of OP has the same precision as the result, then
  2179. we can strip this conversion. The multiply operation will be
  2180. selected to create the correct extension as a by-product. */
  2181. if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
  2182. return true;
  2183. /* We can also strip a conversion if it preserves the signed-ness of
  2184. the operation and doesn't narrow the range. */
  2185. inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
  2186. /* If the inner-most type is unsigned, then we can strip any
  2187. intermediate widening operation. If it's signed, then the
  2188. intermediate widening operation must also be signed. */
  2189. if ((TYPE_UNSIGNED (inner_op_type)
  2190. || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
  2191. && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
  2192. return true;
  2193. return false;
  2194. }
  2195. return rhs_code == FIXED_CONVERT_EXPR;
  2196. }
  2197. /* Return true if RHS is a suitable operand for a widening multiplication,
  2198. assuming a target type of TYPE.
  2199. There are two cases:
  2200. - RHS makes some value at least twice as wide. Store that value
  2201. in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
  2202. - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
  2203. but leave *TYPE_OUT untouched. */
  2204. static bool
  2205. is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
  2206. tree *new_rhs_out)
  2207. {
  2208. gimple stmt;
  2209. tree type1, rhs1;
  2210. if (TREE_CODE (rhs) == SSA_NAME)
  2211. {
  2212. stmt = SSA_NAME_DEF_STMT (rhs);
  2213. if (is_gimple_assign (stmt))
  2214. {
  2215. if (! widening_mult_conversion_strippable_p (type, stmt))
  2216. rhs1 = rhs;
  2217. else
  2218. {
  2219. rhs1 = gimple_assign_rhs1 (stmt);
  2220. if (TREE_CODE (rhs1) == INTEGER_CST)
  2221. {
  2222. *new_rhs_out = rhs1;
  2223. *type_out = NULL;
  2224. return true;
  2225. }
  2226. }
  2227. }
  2228. else
  2229. rhs1 = rhs;
  2230. type1 = TREE_TYPE (rhs1);
  2231. if (TREE_CODE (type1) != TREE_CODE (type)
  2232. || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
  2233. return false;
  2234. *new_rhs_out = rhs1;
  2235. *type_out = type1;
  2236. return true;
  2237. }
  2238. if (TREE_CODE (rhs) == INTEGER_CST)
  2239. {
  2240. *new_rhs_out = rhs;
  2241. *type_out = NULL;
  2242. return true;
  2243. }
  2244. return false;
  2245. }
  2246. /* Return true if STMT performs a widening multiplication, assuming the
  2247. output type is TYPE. If so, store the unwidened types of the operands
  2248. in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
  2249. *RHS2_OUT such that converting those operands to types *TYPE1_OUT
  2250. and *TYPE2_OUT would give the operands of the multiplication. */
  2251. static bool
  2252. is_widening_mult_p (gimple stmt,
  2253. tree *type1_out, tree *rhs1_out,
  2254. tree *type2_out, tree *rhs2_out)
  2255. {
  2256. tree type = TREE_TYPE (gimple_assign_lhs (stmt));
  2257. if (TREE_CODE (type) != INTEGER_TYPE
  2258. && TREE_CODE (type) != FIXED_POINT_TYPE)
  2259. return false;
  2260. if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
  2261. rhs1_out))
  2262. return false;
  2263. if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
  2264. rhs2_out))
  2265. return false;
  2266. if (*type1_out == NULL)
  2267. {
  2268. if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
  2269. return false;
  2270. *type1_out = *type2_out;
  2271. }
  2272. if (*type2_out == NULL)
  2273. {
  2274. if (!int_fits_type_p (*rhs2_out, *type1_out))
  2275. return false;
  2276. *type2_out = *type1_out;
  2277. }
  2278. /* Ensure that the larger of the two operands comes first. */
  2279. if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
  2280. {
  2281. tree tmp;
  2282. tmp = *type1_out;
  2283. *type1_out = *type2_out;
  2284. *type2_out = tmp;
  2285. tmp = *rhs1_out;
  2286. *rhs1_out = *rhs2_out;
  2287. *rhs2_out = tmp;
  2288. }
  2289. return true;
  2290. }
  2291. /* Process a single gimple statement STMT, which has a MULT_EXPR as
  2292. its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
  2293. value is true iff we converted the statement. */
  2294. static bool
  2295. convert_mult_to_widen (gimple stmt, gimple_stmt_iterator *gsi)
  2296. {
  2297. tree lhs, rhs1, rhs2, type, type1, type2;
  2298. enum insn_code handler;
  2299. machine_mode to_mode, from_mode, actual_mode;
  2300. optab op;
  2301. int actual_precision;
  2302. location_t loc = gimple_location (stmt);
  2303. bool from_unsigned1, from_unsigned2;
  2304. lhs = gimple_assign_lhs (stmt);
  2305. type = TREE_TYPE (lhs);
  2306. if (TREE_CODE (type) != INTEGER_TYPE)
  2307. return false;
  2308. if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
  2309. return false;
  2310. to_mode = TYPE_MODE (type);
  2311. from_mode = TYPE_MODE (type1);
  2312. from_unsigned1 = TYPE_UNSIGNED (type1);
  2313. from_unsigned2 = TYPE_UNSIGNED (type2);
  2314. if (from_unsigned1 && from_unsigned2)
  2315. op = umul_widen_optab;
  2316. else if (!from_unsigned1 && !from_unsigned2)
  2317. op = smul_widen_optab;
  2318. else
  2319. op = usmul_widen_optab;
  2320. handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
  2321. 0, &actual_mode);
  2322. if (handler == CODE_FOR_nothing)
  2323. {
  2324. if (op != smul_widen_optab)
  2325. {
  2326. /* We can use a signed multiply with unsigned types as long as
  2327. there is a wider mode to use, or it is the smaller of the two
  2328. types that is unsigned. Note that type1 >= type2, always. */
  2329. if ((TYPE_UNSIGNED (type1)
  2330. && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
  2331. || (TYPE_UNSIGNED (type2)
  2332. && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
  2333. {
  2334. from_mode = GET_MODE_WIDER_MODE (from_mode);
  2335. if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
  2336. return false;
  2337. }
  2338. op = smul_widen_optab;
  2339. handler = find_widening_optab_handler_and_mode (op, to_mode,
  2340. from_mode, 0,
  2341. &actual_mode);
  2342. if (handler == CODE_FOR_nothing)
  2343. return false;
  2344. from_unsigned1 = from_unsigned2 = false;
  2345. }
  2346. else
  2347. return false;
  2348. }
  2349. /* Ensure that the inputs to the handler are in the correct precison
  2350. for the opcode. This will be the full mode size. */
  2351. actual_precision = GET_MODE_PRECISION (actual_mode);
  2352. if (2 * actual_precision > TYPE_PRECISION (type))
  2353. return false;
  2354. if (actual_precision != TYPE_PRECISION (type1)
  2355. || from_unsigned1 != TYPE_UNSIGNED (type1))
  2356. rhs1 = build_and_insert_cast (gsi, loc,
  2357. build_nonstandard_integer_type
  2358. (actual_precision, from_unsigned1), rhs1);
  2359. if (actual_precision != TYPE_PRECISION (type2)
  2360. || from_unsigned2 != TYPE_UNSIGNED (type2))
  2361. rhs2 = build_and_insert_cast (gsi, loc,
  2362. build_nonstandard_integer_type
  2363. (actual_precision, from_unsigned2), rhs2);
  2364. /* Handle constants. */
  2365. if (TREE_CODE (rhs1) == INTEGER_CST)
  2366. rhs1 = fold_convert (type1, rhs1);
  2367. if (TREE_CODE (rhs2) == INTEGER_CST)
  2368. rhs2 = fold_convert (type2, rhs2);
  2369. gimple_assign_set_rhs1 (stmt, rhs1);
  2370. gimple_assign_set_rhs2 (stmt, rhs2);
  2371. gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
  2372. update_stmt (stmt);
  2373. widen_mul_stats.widen_mults_inserted++;
  2374. return true;
  2375. }
  2376. /* Process a single gimple statement STMT, which is found at the
  2377. iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
  2378. rhs (given by CODE), and try to convert it into a
  2379. WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
  2380. is true iff we converted the statement. */
  2381. static bool
  2382. convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
  2383. enum tree_code code)
  2384. {
  2385. gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
  2386. gimple conv1_stmt = NULL, conv2_stmt = NULL, conv_stmt;
  2387. tree type, type1, type2, optype;
  2388. tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
  2389. enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
  2390. optab this_optab;
  2391. enum tree_code wmult_code;
  2392. enum insn_code handler;
  2393. machine_mode to_mode, from_mode, actual_mode;
  2394. location_t loc = gimple_location (stmt);
  2395. int actual_precision;
  2396. bool from_unsigned1, from_unsigned2;
  2397. lhs = gimple_assign_lhs (stmt);
  2398. type = TREE_TYPE (lhs);
  2399. if (TREE_CODE (type) != INTEGER_TYPE
  2400. && TREE_CODE (type) != FIXED_POINT_TYPE)
  2401. return false;
  2402. if (code == MINUS_EXPR)
  2403. wmult_code = WIDEN_MULT_MINUS_EXPR;
  2404. else
  2405. wmult_code = WIDEN_MULT_PLUS_EXPR;
  2406. rhs1 = gimple_assign_rhs1 (stmt);
  2407. rhs2 = gimple_assign_rhs2 (stmt);
  2408. if (TREE_CODE (rhs1) == SSA_NAME)
  2409. {
  2410. rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
  2411. if (is_gimple_assign (rhs1_stmt))
  2412. rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
  2413. }
  2414. if (TREE_CODE (rhs2) == SSA_NAME)
  2415. {
  2416. rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
  2417. if (is_gimple_assign (rhs2_stmt))
  2418. rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
  2419. }
  2420. /* Allow for one conversion statement between the multiply
  2421. and addition/subtraction statement. If there are more than
  2422. one conversions then we assume they would invalidate this
  2423. transformation. If that's not the case then they should have
  2424. been folded before now. */
  2425. if (CONVERT_EXPR_CODE_P (rhs1_code))
  2426. {
  2427. conv1_stmt = rhs1_stmt;
  2428. rhs1 = gimple_assign_rhs1 (rhs1_stmt);
  2429. if (TREE_CODE (rhs1) == SSA_NAME)
  2430. {
  2431. rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
  2432. if (is_gimple_assign (rhs1_stmt))
  2433. rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
  2434. }
  2435. else
  2436. return false;
  2437. }
  2438. if (CONVERT_EXPR_CODE_P (rhs2_code))
  2439. {
  2440. conv2_stmt = rhs2_stmt;
  2441. rhs2 = gimple_assign_rhs1 (rhs2_stmt);
  2442. if (TREE_CODE (rhs2) == SSA_NAME)
  2443. {
  2444. rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
  2445. if (is_gimple_assign (rhs2_stmt))
  2446. rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
  2447. }
  2448. else
  2449. return false;
  2450. }
  2451. /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
  2452. is_widening_mult_p, but we still need the rhs returns.
  2453. It might also appear that it would be sufficient to use the existing
  2454. operands of the widening multiply, but that would limit the choice of
  2455. multiply-and-accumulate instructions.
  2456. If the widened-multiplication result has more than one uses, it is
  2457. probably wiser not to do the conversion. */
  2458. if (code == PLUS_EXPR
  2459. && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
  2460. {
  2461. if (!has_single_use (rhs1)
  2462. || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
  2463. &type2, &mult_rhs2))
  2464. return false;
  2465. add_rhs = rhs2;
  2466. conv_stmt = conv1_stmt;
  2467. }
  2468. else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
  2469. {
  2470. if (!has_single_use (rhs2)
  2471. || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
  2472. &type2, &mult_rhs2))
  2473. return false;
  2474. add_rhs = rhs1;
  2475. conv_stmt = conv2_stmt;
  2476. }
  2477. else
  2478. return false;
  2479. to_mode = TYPE_MODE (type);
  2480. from_mode = TYPE_MODE (type1);
  2481. from_unsigned1 = TYPE_UNSIGNED (type1);
  2482. from_unsigned2 = TYPE_UNSIGNED (type2);
  2483. optype = type1;
  2484. /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
  2485. if (from_unsigned1 != from_unsigned2)
  2486. {
  2487. if (!INTEGRAL_TYPE_P (type))
  2488. return false;
  2489. /* We can use a signed multiply with unsigned types as long as
  2490. there is a wider mode to use, or it is the smaller of the two
  2491. types that is unsigned. Note that type1 >= type2, always. */
  2492. if ((from_unsigned1
  2493. && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
  2494. || (from_unsigned2
  2495. && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
  2496. {
  2497. from_mode = GET_MODE_WIDER_MODE (from_mode);
  2498. if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
  2499. return false;
  2500. }
  2501. from_unsigned1 = from_unsigned2 = false;
  2502. optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
  2503. false);
  2504. }
  2505. /* If there was a conversion between the multiply and addition
  2506. then we need to make sure it fits a multiply-and-accumulate.
  2507. The should be a single mode change which does not change the
  2508. value. */
  2509. if (conv_stmt)
  2510. {
  2511. /* We use the original, unmodified data types for this. */
  2512. tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
  2513. tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
  2514. int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
  2515. bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
  2516. if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
  2517. {
  2518. /* Conversion is a truncate. */
  2519. if (TYPE_PRECISION (to_type) < data_size)
  2520. return false;
  2521. }
  2522. else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
  2523. {
  2524. /* Conversion is an extend. Check it's the right sort. */
  2525. if (TYPE_UNSIGNED (from_type) != is_unsigned
  2526. && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
  2527. return false;
  2528. }
  2529. /* else convert is a no-op for our purposes. */
  2530. }
  2531. /* Verify that the machine can perform a widening multiply
  2532. accumulate in this mode/signedness combination, otherwise
  2533. this transformation is likely to pessimize code. */
  2534. this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
  2535. handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
  2536. from_mode, 0, &actual_mode);
  2537. if (handler == CODE_FOR_nothing)
  2538. return false;
  2539. /* Ensure that the inputs to the handler are in the correct precison
  2540. for the opcode. This will be the full mode size. */
  2541. actual_precision = GET_MODE_PRECISION (actual_mode);
  2542. if (actual_precision != TYPE_PRECISION (type1)
  2543. || from_unsigned1 != TYPE_UNSIGNED (type1))
  2544. mult_rhs1 = build_and_insert_cast (gsi, loc,
  2545. build_nonstandard_integer_type
  2546. (actual_precision, from_unsigned1),
  2547. mult_rhs1);
  2548. if (actual_precision != TYPE_PRECISION (type2)
  2549. || from_unsigned2 != TYPE_UNSIGNED (type2))
  2550. mult_rhs2 = build_and_insert_cast (gsi, loc,
  2551. build_nonstandard_integer_type
  2552. (actual_precision, from_unsigned2),
  2553. mult_rhs2);
  2554. if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
  2555. add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
  2556. /* Handle constants. */
  2557. if (TREE_CODE (mult_rhs1) == INTEGER_CST)
  2558. mult_rhs1 = fold_convert (type1, mult_rhs1);
  2559. if (TREE_CODE (mult_rhs2) == INTEGER_CST)
  2560. mult_rhs2 = fold_convert (type2, mult_rhs2);
  2561. gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
  2562. add_rhs);
  2563. update_stmt (gsi_stmt (*gsi));
  2564. widen_mul_stats.maccs_inserted++;
  2565. return true;
  2566. }
  2567. /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
  2568. with uses in additions and subtractions to form fused multiply-add
  2569. operations. Returns true if successful and MUL_STMT should be removed. */
  2570. static bool
  2571. convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
  2572. {
  2573. tree mul_result = gimple_get_lhs (mul_stmt);
  2574. tree type = TREE_TYPE (mul_result);
  2575. gimple use_stmt, neguse_stmt;
  2576. gassign *fma_stmt;
  2577. use_operand_p use_p;
  2578. imm_use_iterator imm_iter;
  2579. if (FLOAT_TYPE_P (type)
  2580. && flag_fp_contract_mode == FP_CONTRACT_OFF)
  2581. return false;
  2582. /* We don't want to do bitfield reduction ops. */
  2583. if (INTEGRAL_TYPE_P (type)
  2584. && (TYPE_PRECISION (type)
  2585. != GET_MODE_PRECISION (TYPE_MODE (type))))
  2586. return false;
  2587. /* If the target doesn't support it, don't generate it. We assume that
  2588. if fma isn't available then fms, fnma or fnms are not either. */
  2589. if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
  2590. return false;
  2591. /* If the multiplication has zero uses, it is kept around probably because
  2592. of -fnon-call-exceptions. Don't optimize it away in that case,
  2593. it is DCE job. */
  2594. if (has_zero_uses (mul_result))
  2595. return false;
  2596. /* Make sure that the multiplication statement becomes dead after
  2597. the transformation, thus that all uses are transformed to FMAs.
  2598. This means we assume that an FMA operation has the same cost
  2599. as an addition. */
  2600. FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
  2601. {
  2602. enum tree_code use_code;
  2603. tree result = mul_result;
  2604. bool negate_p = false;
  2605. use_stmt = USE_STMT (use_p);
  2606. if (is_gimple_debug (use_stmt))
  2607. continue;
  2608. /* For now restrict this operations to single basic blocks. In theory
  2609. we would want to support sinking the multiplication in
  2610. m = a*b;
  2611. if ()
  2612. ma = m + c;
  2613. else
  2614. d = m;
  2615. to form a fma in the then block and sink the multiplication to the
  2616. else block. */
  2617. if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
  2618. return false;
  2619. if (!is_gimple_assign (use_stmt))
  2620. return false;
  2621. use_code = gimple_assign_rhs_code (use_stmt);
  2622. /* A negate on the multiplication leads to FNMA. */
  2623. if (use_code == NEGATE_EXPR)
  2624. {
  2625. ssa_op_iter iter;
  2626. use_operand_p usep;
  2627. result = gimple_assign_lhs (use_stmt);
  2628. /* Make sure the negate statement becomes dead with this
  2629. single transformation. */
  2630. if (!single_imm_use (gimple_assign_lhs (use_stmt),
  2631. &use_p, &neguse_stmt))
  2632. return false;
  2633. /* Make sure the multiplication isn't also used on that stmt. */
  2634. FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
  2635. if (USE_FROM_PTR (usep) == mul_result)
  2636. return false;
  2637. /* Re-validate. */
  2638. use_stmt = neguse_stmt;
  2639. if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
  2640. return false;
  2641. if (!is_gimple_assign (use_stmt))
  2642. return false;
  2643. use_code = gimple_assign_rhs_code (use_stmt);
  2644. negate_p = true;
  2645. }
  2646. switch (use_code)
  2647. {
  2648. case MINUS_EXPR:
  2649. if (gimple_assign_rhs2 (use_stmt) == result)
  2650. negate_p = !negate_p;
  2651. break;
  2652. case PLUS_EXPR:
  2653. break;
  2654. default:
  2655. /* FMA can only be formed from PLUS and MINUS. */
  2656. return false;
  2657. }
  2658. /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
  2659. by a MULT_EXPR that we'll visit later, we might be able to
  2660. get a more profitable match with fnma.
  2661. OTOH, if we don't, a negate / fma pair has likely lower latency
  2662. that a mult / subtract pair. */
  2663. if (use_code == MINUS_EXPR && !negate_p
  2664. && gimple_assign_rhs1 (use_stmt) == result
  2665. && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
  2666. && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
  2667. {
  2668. tree rhs2 = gimple_assign_rhs2 (use_stmt);
  2669. if (TREE_CODE (rhs2) == SSA_NAME)
  2670. {
  2671. gimple stmt2 = SSA_NAME_DEF_STMT (rhs2);
  2672. if (has_single_use (rhs2)
  2673. && is_gimple_assign (stmt2)
  2674. && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
  2675. return false;
  2676. }
  2677. }
  2678. /* We can't handle a * b + a * b. */
  2679. if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
  2680. return false;
  2681. /* While it is possible to validate whether or not the exact form
  2682. that we've recognized is available in the backend, the assumption
  2683. is that the transformation is never a loss. For instance, suppose
  2684. the target only has the plain FMA pattern available. Consider
  2685. a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
  2686. is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
  2687. still have 3 operations, but in the FMA form the two NEGs are
  2688. independent and could be run in parallel. */
  2689. }
  2690. FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
  2691. {
  2692. gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
  2693. enum tree_code use_code;
  2694. tree addop, mulop1 = op1, result = mul_result;
  2695. bool negate_p = false;
  2696. if (is_gimple_debug (use_stmt))
  2697. continue;
  2698. use_code = gimple_assign_rhs_code (use_stmt);
  2699. if (use_code == NEGATE_EXPR)
  2700. {
  2701. result = gimple_assign_lhs (use_stmt);
  2702. single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
  2703. gsi_remove (&gsi, true);
  2704. release_defs (use_stmt);
  2705. use_stmt = neguse_stmt;
  2706. gsi = gsi_for_stmt (use_stmt);
  2707. use_code = gimple_assign_rhs_code (use_stmt);
  2708. negate_p = true;
  2709. }
  2710. if (gimple_assign_rhs1 (use_stmt) == result)
  2711. {
  2712. addop = gimple_assign_rhs2 (use_stmt);
  2713. /* a * b - c -> a * b + (-c) */
  2714. if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
  2715. addop = force_gimple_operand_gsi (&gsi,
  2716. build1 (NEGATE_EXPR,
  2717. type, addop),
  2718. true, NULL_TREE, true,
  2719. GSI_SAME_STMT);
  2720. }
  2721. else
  2722. {
  2723. addop = gimple_assign_rhs1 (use_stmt);
  2724. /* a - b * c -> (-b) * c + a */
  2725. if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
  2726. negate_p = !negate_p;
  2727. }
  2728. if (negate_p)
  2729. mulop1 = force_gimple_operand_gsi (&gsi,
  2730. build1 (NEGATE_EXPR,
  2731. type, mulop1),
  2732. true, NULL_TREE, true,
  2733. GSI_SAME_STMT);
  2734. fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
  2735. FMA_EXPR, mulop1, op2, addop);
  2736. gsi_replace (&gsi, fma_stmt, true);
  2737. widen_mul_stats.fmas_inserted++;
  2738. }
  2739. return true;
  2740. }
  2741. /* Find integer multiplications where the operands are extended from
  2742. smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
  2743. where appropriate. */
  2744. namespace {
  2745. const pass_data pass_data_optimize_widening_mul =
  2746. {
  2747. GIMPLE_PASS, /* type */
  2748. "widening_mul", /* name */
  2749. OPTGROUP_NONE, /* optinfo_flags */
  2750. TV_NONE, /* tv_id */
  2751. PROP_ssa, /* properties_required */
  2752. 0, /* properties_provided */
  2753. 0, /* properties_destroyed */
  2754. 0, /* todo_flags_start */
  2755. TODO_update_ssa, /* todo_flags_finish */
  2756. };
  2757. class pass_optimize_widening_mul : public gimple_opt_pass
  2758. {
  2759. public:
  2760. pass_optimize_widening_mul (gcc::context *ctxt)
  2761. : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
  2762. {}
  2763. /* opt_pass methods: */
  2764. virtual bool gate (function *)
  2765. {
  2766. return flag_expensive_optimizations && optimize;
  2767. }
  2768. virtual unsigned int execute (function *);
  2769. }; // class pass_optimize_widening_mul
  2770. unsigned int
  2771. pass_optimize_widening_mul::execute (function *fun)
  2772. {
  2773. basic_block bb;
  2774. bool cfg_changed = false;
  2775. memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
  2776. FOR_EACH_BB_FN (bb, fun)
  2777. {
  2778. gimple_stmt_iterator gsi;
  2779. for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
  2780. {
  2781. gimple stmt = gsi_stmt (gsi);
  2782. enum tree_code code;
  2783. if (is_gimple_assign (stmt))
  2784. {
  2785. code = gimple_assign_rhs_code (stmt);
  2786. switch (code)
  2787. {
  2788. case MULT_EXPR:
  2789. if (!convert_mult_to_widen (stmt, &gsi)
  2790. && convert_mult_to_fma (stmt,
  2791. gimple_assign_rhs1 (stmt),
  2792. gimple_assign_rhs2 (stmt)))
  2793. {
  2794. gsi_remove (&gsi, true);
  2795. release_defs (stmt);
  2796. continue;
  2797. }
  2798. break;
  2799. case PLUS_EXPR:
  2800. case MINUS_EXPR:
  2801. convert_plusminus_to_widen (&gsi, stmt, code);
  2802. break;
  2803. default:;
  2804. }
  2805. }
  2806. else if (is_gimple_call (stmt)
  2807. && gimple_call_lhs (stmt))
  2808. {
  2809. tree fndecl = gimple_call_fndecl (stmt);
  2810. if (fndecl
  2811. && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
  2812. {
  2813. switch (DECL_FUNCTION_CODE (fndecl))
  2814. {
  2815. case BUILT_IN_POWF:
  2816. case BUILT_IN_POW:
  2817. case BUILT_IN_POWL:
  2818. if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
  2819. && REAL_VALUES_EQUAL
  2820. (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
  2821. dconst2)
  2822. && convert_mult_to_fma (stmt,
  2823. gimple_call_arg (stmt, 0),
  2824. gimple_call_arg (stmt, 0)))
  2825. {
  2826. unlink_stmt_vdef (stmt);
  2827. if (gsi_remove (&gsi, true)
  2828. && gimple_purge_dead_eh_edges (bb))
  2829. cfg_changed = true;
  2830. release_defs (stmt);
  2831. continue;
  2832. }
  2833. break;
  2834. default:;
  2835. }
  2836. }
  2837. }
  2838. gsi_next (&gsi);
  2839. }
  2840. }
  2841. statistics_counter_event (fun, "widening multiplications inserted",
  2842. widen_mul_stats.widen_mults_inserted);
  2843. statistics_counter_event (fun, "widening maccs inserted",
  2844. widen_mul_stats.maccs_inserted);
  2845. statistics_counter_event (fun, "fused multiply-adds inserted",
  2846. widen_mul_stats.fmas_inserted);
  2847. return cfg_changed ? TODO_cleanup_cfg : 0;
  2848. }
  2849. } // anon namespace
  2850. gimple_opt_pass *
  2851. make_pass_optimize_widening_mul (gcc::context *ctxt)
  2852. {
  2853. return new pass_optimize_widening_mul (ctxt);
  2854. }