match.pd 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
  2. This file is consumed by genmatch which produces gimple-match.c
  3. and generic-match.c from it.
  4. Copyright (C) 2014-2015 Free Software Foundation, Inc.
  5. Contributed by Richard Biener <rguenther@suse.de>
  6. and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
  7. This file is part of GCC.
  8. GCC is free software; you can redistribute it and/or modify it under
  9. the terms of the GNU General Public License as published by the Free
  10. Software Foundation; either version 3, or (at your option) any later
  11. version.
  12. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  13. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  15. for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with GCC; see the file COPYING3. If not see
  18. <http://www.gnu.org/licenses/>. */
  19. /* Generic tree predicates we inherit. */
  20. (define_predicates
  21. integer_onep integer_zerop integer_all_onesp integer_minus_onep
  22. integer_each_onep integer_truep
  23. real_zerop real_onep real_minus_onep
  24. CONSTANT_CLASS_P
  25. tree_expr_nonnegative_p)
  26. /* Operator lists. */
  27. (define_operator_list tcc_comparison
  28. lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
  29. (define_operator_list inverted_tcc_comparison
  30. ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
  31. (define_operator_list inverted_tcc_comparison_with_nans
  32. unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
  33. /* Simplifications of operations with one constant operand and
  34. simplifications to constants or single values. */
  35. (for op (plus pointer_plus minus bit_ior bit_xor)
  36. (simplify
  37. (op @0 integer_zerop)
  38. (non_lvalue @0)))
  39. /* 0 +p index -> (type)index */
  40. (simplify
  41. (pointer_plus integer_zerop @1)
  42. (non_lvalue (convert @1)))
  43. /* See if ARG1 is zero and X + ARG1 reduces to X.
  44. Likewise if the operands are reversed. */
  45. (simplify
  46. (plus:c @0 real_zerop@1)
  47. (if (fold_real_zero_addition_p (type, @1, 0))
  48. (non_lvalue @0)))
  49. /* See if ARG1 is zero and X - ARG1 reduces to X. */
  50. (simplify
  51. (minus @0 real_zerop@1)
  52. (if (fold_real_zero_addition_p (type, @1, 1))
  53. (non_lvalue @0)))
  54. /* Simplify x - x.
  55. This is unsafe for certain floats even in non-IEEE formats.
  56. In IEEE, it is unsafe because it does wrong for NaNs.
  57. Also note that operand_equal_p is always false if an operand
  58. is volatile. */
  59. (simplify
  60. (minus @0 @0)
  61. (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
  62. { build_zero_cst (type); }))
  63. (simplify
  64. (mult @0 integer_zerop@1)
  65. @1)
  66. /* Maybe fold x * 0 to 0. The expressions aren't the same
  67. when x is NaN, since x * 0 is also NaN. Nor are they the
  68. same in modes with signed zeros, since multiplying a
  69. negative value by 0 gives -0, not +0. */
  70. (simplify
  71. (mult @0 real_zerop@1)
  72. (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (element_mode (type)))
  73. @1))
  74. /* In IEEE floating point, x*1 is not equivalent to x for snans.
  75. Likewise for complex arithmetic with signed zeros. */
  76. (simplify
  77. (mult @0 real_onep)
  78. (if (!HONOR_SNANS (element_mode (type))
  79. && (!HONOR_SIGNED_ZEROS (element_mode (type))
  80. || !COMPLEX_FLOAT_TYPE_P (type)))
  81. (non_lvalue @0)))
  82. /* Transform x * -1.0 into -x. */
  83. (simplify
  84. (mult @0 real_minus_onep)
  85. (if (!HONOR_SNANS (element_mode (type))
  86. && (!HONOR_SIGNED_ZEROS (element_mode (type))
  87. || !COMPLEX_FLOAT_TYPE_P (type)))
  88. (negate @0)))
  89. /* Make sure to preserve divisions by zero. This is the reason why
  90. we don't simplify x / x to 1 or 0 / x to 0. */
  91. (for op (mult trunc_div ceil_div floor_div round_div exact_div)
  92. (simplify
  93. (op @0 integer_onep)
  94. (non_lvalue @0)))
  95. /* X / -1 is -X. */
  96. (for div (trunc_div ceil_div floor_div round_div exact_div)
  97. (simplify
  98. (div @0 integer_minus_onep@1)
  99. (if (!TYPE_UNSIGNED (type))
  100. (negate @0))))
  101. /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
  102. TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
  103. (simplify
  104. (floor_div @0 @1)
  105. (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
  106. && TYPE_UNSIGNED (type))
  107. (trunc_div @0 @1)))
  108. /* Combine two successive divisions. Note that combining ceil_div
  109. and floor_div is trickier and combining round_div even more so. */
  110. (for div (trunc_div exact_div)
  111. (simplify
  112. (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
  113. (with {
  114. bool overflow_p;
  115. wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
  116. }
  117. (if (!overflow_p)
  118. (div @0 { wide_int_to_tree (type, mul); }))
  119. (if (overflow_p
  120. && (TYPE_UNSIGNED (type)
  121. || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)))
  122. { build_zero_cst (type); }))))
  123. /* Optimize A / A to 1.0 if we don't care about
  124. NaNs or Infinities. */
  125. (simplify
  126. (rdiv @0 @0)
  127. (if (FLOAT_TYPE_P (type)
  128. && ! HONOR_NANS (type)
  129. && ! HONOR_INFINITIES (element_mode (type)))
  130. { build_one_cst (type); }))
  131. /* Optimize -A / A to -1.0 if we don't care about
  132. NaNs or Infinities. */
  133. (simplify
  134. (rdiv:c @0 (negate @0))
  135. (if (FLOAT_TYPE_P (type)
  136. && ! HONOR_NANS (type)
  137. && ! HONOR_INFINITIES (element_mode (type)))
  138. { build_minus_one_cst (type); }))
  139. /* In IEEE floating point, x/1 is not equivalent to x for snans. */
  140. (simplify
  141. (rdiv @0 real_onep)
  142. (if (!HONOR_SNANS (element_mode (type)))
  143. (non_lvalue @0)))
  144. /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
  145. (simplify
  146. (rdiv @0 real_minus_onep)
  147. (if (!HONOR_SNANS (element_mode (type)))
  148. (negate @0)))
  149. /* If ARG1 is a constant, we can convert this to a multiply by the
  150. reciprocal. This does not have the same rounding properties,
  151. so only do this if -freciprocal-math. We can actually
  152. always safely do it if ARG1 is a power of two, but it's hard to
  153. tell if it is or not in a portable manner. */
  154. (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
  155. (simplify
  156. (rdiv @0 cst@1)
  157. (if (optimize)
  158. (if (flag_reciprocal_math
  159. && !real_zerop (@1))
  160. (with
  161. { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
  162. (if (tem)
  163. (mult @0 { tem; } ))))
  164. (if (cst != COMPLEX_CST)
  165. (with { tree inverse = exact_inverse (type, @1); }
  166. (if (inverse)
  167. (mult @0 { inverse; } )))))))
  168. /* Same applies to modulo operations, but fold is inconsistent here
  169. and simplifies 0 % x to 0, only preserving literal 0 % 0. */
  170. (for mod (ceil_mod floor_mod round_mod trunc_mod)
  171. /* 0 % X is always zero. */
  172. (simplify
  173. (mod integer_zerop@0 @1)
  174. /* But not for 0 % 0 so that we can get the proper warnings and errors. */
  175. (if (!integer_zerop (@1))
  176. @0))
  177. /* X % 1 is always zero. */
  178. (simplify
  179. (mod @0 integer_onep)
  180. { build_zero_cst (type); })
  181. /* X % -1 is zero. */
  182. (simplify
  183. (mod @0 integer_minus_onep@1)
  184. (if (!TYPE_UNSIGNED (type))
  185. { build_zero_cst (type); })))
  186. /* X % -C is the same as X % C. */
  187. (simplify
  188. (trunc_mod @0 INTEGER_CST@1)
  189. (if (TYPE_SIGN (type) == SIGNED
  190. && !TREE_OVERFLOW (@1)
  191. && wi::neg_p (@1)
  192. && !TYPE_OVERFLOW_TRAPS (type)
  193. /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
  194. && !sign_bit_p (@1, @1))
  195. (trunc_mod @0 (negate @1))))
  196. /* x | ~0 -> ~0 */
  197. (simplify
  198. (bit_ior @0 integer_all_onesp@1)
  199. @1)
  200. /* x & 0 -> 0 */
  201. (simplify
  202. (bit_and @0 integer_zerop@1)
  203. @1)
  204. /* x ^ x -> 0 */
  205. (simplify
  206. (bit_xor @0 @0)
  207. { build_zero_cst (type); })
  208. /* Canonicalize X ^ ~0 to ~X. */
  209. (simplify
  210. (bit_xor @0 integer_all_onesp@1)
  211. (bit_not @0))
  212. /* x & ~0 -> x */
  213. (simplify
  214. (bit_and @0 integer_all_onesp)
  215. (non_lvalue @0))
  216. /* x & x -> x, x | x -> x */
  217. (for bitop (bit_and bit_ior)
  218. (simplify
  219. (bitop @0 @0)
  220. (non_lvalue @0)))
  221. (simplify
  222. (abs (negate @0))
  223. (abs @0))
  224. (simplify
  225. (abs tree_expr_nonnegative_p@0)
  226. @0)
  227. /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
  228. when profitable.
  229. For bitwise binary operations apply operand conversions to the
  230. binary operation result instead of to the operands. This allows
  231. to combine successive conversions and bitwise binary operations.
  232. We combine the above two cases by using a conditional convert. */
  233. (for bitop (bit_and bit_ior bit_xor)
  234. (simplify
  235. (bitop (convert @0) (convert? @1))
  236. (if (((TREE_CODE (@1) == INTEGER_CST
  237. && INTEGRAL_TYPE_P (TREE_TYPE (@0))
  238. && int_fits_type_p (@1, TREE_TYPE (@0)))
  239. || (GIMPLE && types_compatible_p (TREE_TYPE (@0), TREE_TYPE (@1)))
  240. || (GENERIC && TREE_TYPE (@0) == TREE_TYPE (@1)))
  241. /* ??? This transform conflicts with fold-const.c doing
  242. Convert (T)(x & c) into (T)x & (T)c, if c is an integer
  243. constants (if x has signed type, the sign bit cannot be set
  244. in c). This folds extension into the BIT_AND_EXPR.
  245. Restrict it to GIMPLE to avoid endless recursions. */
  246. && (bitop != BIT_AND_EXPR || GIMPLE)
  247. && (/* That's a good idea if the conversion widens the operand, thus
  248. after hoisting the conversion the operation will be narrower. */
  249. TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
  250. /* It's also a good idea if the conversion is to a non-integer
  251. mode. */
  252. || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
  253. /* Or if the precision of TO is not the same as the precision
  254. of its mode. */
  255. || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
  256. (convert (bitop @0 (convert @1))))))
  257. /* Simplify (A & B) OP0 (C & B) to (A OP0 C) & B. */
  258. (for bitop (bit_and bit_ior bit_xor)
  259. (simplify
  260. (bitop (bit_and:c @0 @1) (bit_and @2 @1))
  261. (bit_and (bitop @0 @2) @1)))
  262. /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
  263. (simplify
  264. (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
  265. (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
  266. /* Combine successive equal operations with constants. */
  267. (for bitop (bit_and bit_ior bit_xor)
  268. (simplify
  269. (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
  270. (bitop @0 (bitop @1 @2))))
  271. /* Try simple folding for X op !X, and X op X with the help
  272. of the truth_valued_p and logical_inverted_value predicates. */
  273. (match truth_valued_p
  274. @0
  275. (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
  276. (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
  277. (match truth_valued_p
  278. (op @0 @1)))
  279. (match truth_valued_p
  280. (truth_not @0))
  281. (match (logical_inverted_value @0)
  282. (bit_not truth_valued_p@0))
  283. (match (logical_inverted_value @0)
  284. (eq @0 integer_zerop))
  285. (match (logical_inverted_value @0)
  286. (ne truth_valued_p@0 integer_truep))
  287. (match (logical_inverted_value @0)
  288. (bit_xor truth_valued_p@0 integer_truep))
  289. /* X & !X -> 0. */
  290. (simplify
  291. (bit_and:c @0 (logical_inverted_value @0))
  292. { build_zero_cst (type); })
  293. /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
  294. (for op (bit_ior bit_xor)
  295. (simplify
  296. (op:c truth_valued_p@0 (logical_inverted_value @0))
  297. { constant_boolean_node (true, type); }))
  298. (for bitop (bit_and bit_ior)
  299. rbitop (bit_ior bit_and)
  300. /* (x | y) & x -> x */
  301. /* (x & y) | x -> x */
  302. (simplify
  303. (bitop:c (rbitop:c @0 @1) @0)
  304. @0)
  305. /* (~x | y) & x -> x & y */
  306. /* (~x & y) | x -> x | y */
  307. (simplify
  308. (bitop:c (rbitop:c (bit_not @0) @1) @0)
  309. (bitop @0 @1)))
  310. /* If arg1 and arg2 are booleans (or any single bit type)
  311. then try to simplify:
  312. (~X & Y) -> X < Y
  313. (X & ~Y) -> Y < X
  314. (~X | Y) -> X <= Y
  315. (X | ~Y) -> Y <= X
  316. But only do this if our result feeds into a comparison as
  317. this transformation is not always a win, particularly on
  318. targets with and-not instructions.
  319. -> simplify_bitwise_binary_boolean */
  320. (simplify
  321. (ne (bit_and:c (bit_not @0) @1) integer_zerop)
  322. (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
  323. && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
  324. (lt @0 @1)))
  325. (simplify
  326. (ne (bit_ior:c (bit_not @0) @1) integer_zerop)
  327. (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
  328. && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
  329. (le @0 @1)))
  330. /* ~~x -> x */
  331. (simplify
  332. (bit_not (bit_not @0))
  333. @0)
  334. /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
  335. (simplify
  336. (bit_ior:c (bit_and:c@3 @0 (bit_not @2)) (bit_and:c@4 @1 @2))
  337. (if ((TREE_CODE (@3) != SSA_NAME || has_single_use (@3))
  338. && (TREE_CODE (@4) != SSA_NAME || has_single_use (@4)))
  339. (bit_xor (bit_and (bit_xor @0 @1) @2) @0)))
  340. /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
  341. (simplify
  342. (pointer_plus (pointer_plus@2 @0 @1) @3)
  343. (if (TREE_CODE (@2) != SSA_NAME || has_single_use (@2))
  344. (pointer_plus @0 (plus @1 @3))))
  345. /* Pattern match
  346. tem1 = (long) ptr1;
  347. tem2 = (long) ptr2;
  348. tem3 = tem2 - tem1;
  349. tem4 = (unsigned long) tem3;
  350. tem5 = ptr1 + tem4;
  351. and produce
  352. tem5 = ptr2; */
  353. (simplify
  354. (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
  355. /* Conditionally look through a sign-changing conversion. */
  356. (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
  357. && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
  358. || (GENERIC && type == TREE_TYPE (@1))))
  359. @1))
  360. /* Pattern match
  361. tem = (sizetype) ptr;
  362. tem = tem & algn;
  363. tem = -tem;
  364. ... = ptr p+ tem;
  365. and produce the simpler and easier to analyze with respect to alignment
  366. ... = ptr & ~algn; */
  367. (simplify
  368. (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
  369. (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
  370. (bit_and @0 { algn; })))
  371. /* We can't reassociate at all for saturating types. */
  372. (if (!TYPE_SATURATING (type))
  373. /* Contract negates. */
  374. /* A + (-B) -> A - B */
  375. (simplify
  376. (plus:c (convert1? @0) (convert2? (negate @1)))
  377. /* Apply STRIP_NOPS on @0 and the negate. */
  378. (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
  379. && tree_nop_conversion_p (type, TREE_TYPE (@1))
  380. && !TYPE_OVERFLOW_SANITIZED (type))
  381. (minus (convert @0) (convert @1))))
  382. /* A - (-B) -> A + B */
  383. (simplify
  384. (minus (convert1? @0) (convert2? (negate @1)))
  385. (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
  386. && tree_nop_conversion_p (type, TREE_TYPE (@1))
  387. && !TYPE_OVERFLOW_SANITIZED (type))
  388. (plus (convert @0) (convert @1))))
  389. /* -(-A) -> A */
  390. (simplify
  391. (negate (convert? (negate @1)))
  392. (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
  393. && !TYPE_OVERFLOW_SANITIZED (type))
  394. (convert @1)))
  395. /* We can't reassociate floating-point or fixed-point plus or minus
  396. because of saturation to +-Inf. */
  397. (if (!FLOAT_TYPE_P (type) && !FIXED_POINT_TYPE_P (type))
  398. /* Match patterns that allow contracting a plus-minus pair
  399. irrespective of overflow issues. */
  400. /* (A +- B) - A -> +- B */
  401. /* (A +- B) -+ B -> A */
  402. /* A - (A +- B) -> -+ B */
  403. /* A +- (B -+ A) -> +- B */
  404. (simplify
  405. (minus (plus:c @0 @1) @0)
  406. @1)
  407. (simplify
  408. (minus (minus @0 @1) @0)
  409. (negate @1))
  410. (simplify
  411. (plus:c (minus @0 @1) @1)
  412. @0)
  413. (simplify
  414. (minus @0 (plus:c @0 @1))
  415. (negate @1))
  416. (simplify
  417. (minus @0 (minus @0 @1))
  418. @1)
  419. /* (A +- CST) +- CST -> A + CST */
  420. (for outer_op (plus minus)
  421. (for inner_op (plus minus)
  422. (simplify
  423. (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
  424. /* If the constant operation overflows we cannot do the transform
  425. as we would introduce undefined overflow, for example
  426. with (a - 1) + INT_MIN. */
  427. (with { tree cst = fold_binary (outer_op == inner_op
  428. ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
  429. (if (cst && !TREE_OVERFLOW (cst))
  430. (inner_op @0 { cst; } ))))))
  431. /* (CST - A) +- CST -> CST - A */
  432. (for outer_op (plus minus)
  433. (simplify
  434. (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
  435. (with { tree cst = fold_binary (outer_op, type, @1, @2); }
  436. (if (cst && !TREE_OVERFLOW (cst))
  437. (minus { cst; } @0)))))
  438. /* ~A + A -> -1 */
  439. (simplify
  440. (plus:c (bit_not @0) @0)
  441. (if (!TYPE_OVERFLOW_TRAPS (type))
  442. { build_all_ones_cst (type); }))
  443. /* ~A + 1 -> -A */
  444. (simplify
  445. (plus (convert? (bit_not @0)) integer_each_onep)
  446. (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
  447. (negate (convert @0))))
  448. /* -A - 1 -> ~A */
  449. (simplify
  450. (minus (convert? (negate @0)) integer_each_onep)
  451. (if (!TYPE_OVERFLOW_TRAPS (type)
  452. && tree_nop_conversion_p (type, TREE_TYPE (@0)))
  453. (bit_not (convert @0))))
  454. /* -1 - A -> ~A */
  455. (simplify
  456. (minus integer_all_onesp @0)
  457. (if (TREE_CODE (type) != COMPLEX_TYPE)
  458. (bit_not @0)))
  459. /* (T)(P + A) - (T)P -> (T) A */
  460. (for add (plus pointer_plus)
  461. (simplify
  462. (minus (convert (add @0 @1))
  463. (convert @0))
  464. (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
  465. /* For integer types, if A has a smaller type
  466. than T the result depends on the possible
  467. overflow in P + A.
  468. E.g. T=size_t, A=(unsigned)429497295, P>0.
  469. However, if an overflow in P + A would cause
  470. undefined behavior, we can assume that there
  471. is no overflow. */
  472. || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
  473. && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
  474. /* For pointer types, if the conversion of A to the
  475. final type requires a sign- or zero-extension,
  476. then we have to punt - it is not defined which
  477. one is correct. */
  478. || (POINTER_TYPE_P (TREE_TYPE (@0))
  479. && TREE_CODE (@1) == INTEGER_CST
  480. && tree_int_cst_sign_bit (@1) == 0))
  481. (convert @1))))))
  482. /* Simplifications of MIN_EXPR and MAX_EXPR. */
  483. (for minmax (min max)
  484. (simplify
  485. (minmax @0 @0)
  486. @0))
  487. (simplify
  488. (min @0 @1)
  489. (if (INTEGRAL_TYPE_P (type)
  490. && TYPE_MIN_VALUE (type)
  491. && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
  492. @1))
  493. (simplify
  494. (max @0 @1)
  495. (if (INTEGRAL_TYPE_P (type)
  496. && TYPE_MAX_VALUE (type)
  497. && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
  498. @1))
  499. /* Simplifications of shift and rotates. */
  500. (for rotate (lrotate rrotate)
  501. (simplify
  502. (rotate integer_all_onesp@0 @1)
  503. @0))
  504. /* Optimize -1 >> x for arithmetic right shifts. */
  505. (simplify
  506. (rshift integer_all_onesp@0 @1)
  507. (if (!TYPE_UNSIGNED (type)
  508. && tree_expr_nonnegative_p (@1))
  509. @0))
  510. (for shiftrotate (lrotate rrotate lshift rshift)
  511. (simplify
  512. (shiftrotate @0 integer_zerop)
  513. (non_lvalue @0))
  514. (simplify
  515. (shiftrotate integer_zerop@0 @1)
  516. @0)
  517. /* Prefer vector1 << scalar to vector1 << vector2
  518. if vector2 is uniform. */
  519. (for vec (VECTOR_CST CONSTRUCTOR)
  520. (simplify
  521. (shiftrotate @0 vec@1)
  522. (with { tree tem = uniform_vector_p (@1); }
  523. (if (tem)
  524. (shiftrotate @0 { tem; }))))))
  525. /* Rewrite an LROTATE_EXPR by a constant into an
  526. RROTATE_EXPR by a new constant. */
  527. (simplify
  528. (lrotate @0 INTEGER_CST@1)
  529. (rrotate @0 { fold_binary (MINUS_EXPR, TREE_TYPE (@1),
  530. build_int_cst (TREE_TYPE (@1),
  531. element_precision (type)), @1); }))
  532. /* ((1 << A) & 1) != 0 -> A == 0
  533. ((1 << A) & 1) == 0 -> A != 0 */
  534. (for cmp (ne eq)
  535. icmp (eq ne)
  536. (simplify
  537. (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
  538. (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
  539. /* Simplifications of conversions. */
  540. /* Basic strip-useless-type-conversions / strip_nops. */
  541. (for cvt (convert view_convert float fix_trunc)
  542. (simplify
  543. (cvt @0)
  544. (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
  545. || (GENERIC && type == TREE_TYPE (@0)))
  546. @0)))
  547. /* Contract view-conversions. */
  548. (simplify
  549. (view_convert (view_convert @0))
  550. (view_convert @0))
  551. /* For integral conversions with the same precision or pointer
  552. conversions use a NOP_EXPR instead. */
  553. (simplify
  554. (view_convert @0)
  555. (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
  556. && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
  557. && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
  558. (convert @0)))
  559. /* Strip inner integral conversions that do not change precision or size. */
  560. (simplify
  561. (view_convert (convert@0 @1))
  562. (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
  563. && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
  564. && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
  565. && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
  566. (view_convert @1)))
  567. /* Re-association barriers around constants and other re-association
  568. barriers can be removed. */
  569. (simplify
  570. (paren CONSTANT_CLASS_P@0)
  571. @0)
  572. (simplify
  573. (paren (paren@1 @0))
  574. @1)
  575. /* Handle cases of two conversions in a row. */
  576. (for ocvt (convert float fix_trunc)
  577. (for icvt (convert float)
  578. (simplify
  579. (ocvt (icvt@1 @0))
  580. (with
  581. {
  582. tree inside_type = TREE_TYPE (@0);
  583. tree inter_type = TREE_TYPE (@1);
  584. int inside_int = INTEGRAL_TYPE_P (inside_type);
  585. int inside_ptr = POINTER_TYPE_P (inside_type);
  586. int inside_float = FLOAT_TYPE_P (inside_type);
  587. int inside_vec = VECTOR_TYPE_P (inside_type);
  588. unsigned int inside_prec = TYPE_PRECISION (inside_type);
  589. int inside_unsignedp = TYPE_UNSIGNED (inside_type);
  590. int inter_int = INTEGRAL_TYPE_P (inter_type);
  591. int inter_ptr = POINTER_TYPE_P (inter_type);
  592. int inter_float = FLOAT_TYPE_P (inter_type);
  593. int inter_vec = VECTOR_TYPE_P (inter_type);
  594. unsigned int inter_prec = TYPE_PRECISION (inter_type);
  595. int inter_unsignedp = TYPE_UNSIGNED (inter_type);
  596. int final_int = INTEGRAL_TYPE_P (type);
  597. int final_ptr = POINTER_TYPE_P (type);
  598. int final_float = FLOAT_TYPE_P (type);
  599. int final_vec = VECTOR_TYPE_P (type);
  600. unsigned int final_prec = TYPE_PRECISION (type);
  601. int final_unsignedp = TYPE_UNSIGNED (type);
  602. }
  603. /* In addition to the cases of two conversions in a row
  604. handled below, if we are converting something to its own
  605. type via an object of identical or wider precision, neither
  606. conversion is needed. */
  607. (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
  608. || (GENERIC
  609. && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
  610. && (((inter_int || inter_ptr) && final_int)
  611. || (inter_float && final_float))
  612. && inter_prec >= final_prec)
  613. (ocvt @0))
  614. /* Likewise, if the intermediate and initial types are either both
  615. float or both integer, we don't need the middle conversion if the
  616. former is wider than the latter and doesn't change the signedness
  617. (for integers). Avoid this if the final type is a pointer since
  618. then we sometimes need the middle conversion. Likewise if the
  619. final type has a precision not equal to the size of its mode. */
  620. (if (((inter_int && inside_int) || (inter_float && inside_float))
  621. && (final_int || final_float)
  622. && inter_prec >= inside_prec
  623. && (inter_float || inter_unsignedp == inside_unsignedp)
  624. && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
  625. && TYPE_MODE (type) == TYPE_MODE (inter_type)))
  626. (ocvt @0))
  627. /* If we have a sign-extension of a zero-extended value, we can
  628. replace that by a single zero-extension. Likewise if the
  629. final conversion does not change precision we can drop the
  630. intermediate conversion. */
  631. (if (inside_int && inter_int && final_int
  632. && ((inside_prec < inter_prec && inter_prec < final_prec
  633. && inside_unsignedp && !inter_unsignedp)
  634. || final_prec == inter_prec))
  635. (ocvt @0))
  636. /* Two conversions in a row are not needed unless:
  637. - some conversion is floating-point (overstrict for now), or
  638. - some conversion is a vector (overstrict for now), or
  639. - the intermediate type is narrower than both initial and
  640. final, or
  641. - the intermediate type and innermost type differ in signedness,
  642. and the outermost type is wider than the intermediate, or
  643. - the initial type is a pointer type and the precisions of the
  644. intermediate and final types differ, or
  645. - the final type is a pointer type and the precisions of the
  646. initial and intermediate types differ. */
  647. (if (! inside_float && ! inter_float && ! final_float
  648. && ! inside_vec && ! inter_vec && ! final_vec
  649. && (inter_prec >= inside_prec || inter_prec >= final_prec)
  650. && ! (inside_int && inter_int
  651. && inter_unsignedp != inside_unsignedp
  652. && inter_prec < final_prec)
  653. && ((inter_unsignedp && inter_prec > inside_prec)
  654. == (final_unsignedp && final_prec > inter_prec))
  655. && ! (inside_ptr && inter_prec != final_prec)
  656. && ! (final_ptr && inside_prec != inter_prec)
  657. && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
  658. && TYPE_MODE (type) == TYPE_MODE (inter_type)))
  659. (ocvt @0))
  660. /* A truncation to an unsigned type (a zero-extension) should be
  661. canonicalized as bitwise and of a mask. */
  662. (if (final_int && inter_int && inside_int
  663. && final_prec == inside_prec
  664. && final_prec > inter_prec
  665. && inter_unsignedp)
  666. (convert (bit_and @0 { wide_int_to_tree
  667. (inside_type,
  668. wi::mask (inter_prec, false,
  669. TYPE_PRECISION (inside_type))); })))
  670. /* If we are converting an integer to a floating-point that can
  671. represent it exactly and back to an integer, we can skip the
  672. floating-point conversion. */
  673. (if (GIMPLE /* PR66211 */
  674. && inside_int && inter_float && final_int &&
  675. (unsigned) significand_size (TYPE_MODE (inter_type))
  676. >= inside_prec - !inside_unsignedp)
  677. (convert @0))))))
  678. /* If we have a narrowing conversion to an integral type that is fed by a
  679. BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
  680. masks off bits outside the final type (and nothing else). */
  681. (simplify
  682. (convert (bit_and @0 INTEGER_CST@1))
  683. (if (INTEGRAL_TYPE_P (type)
  684. && INTEGRAL_TYPE_P (TREE_TYPE (@0))
  685. && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
  686. && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
  687. TYPE_PRECISION (type)), 0))
  688. (convert @0)))
  689. /* (X /[ex] A) * A -> X. */
  690. (simplify
  691. (mult (convert? (exact_div @0 @1)) @1)
  692. /* Look through a sign-changing conversion. */
  693. (if (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
  694. (convert @0)))
  695. /* Canonicalization of binary operations. */
  696. /* Convert X + -C into X - C. */
  697. (simplify
  698. (plus @0 REAL_CST@1)
  699. (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
  700. (with { tree tem = fold_unary (NEGATE_EXPR, type, @1); }
  701. (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
  702. (minus @0 { tem; })))))
  703. /* Convert x+x into x*2.0. */
  704. (simplify
  705. (plus @0 @0)
  706. (if (SCALAR_FLOAT_TYPE_P (type))
  707. (mult @0 { build_real (type, dconst2); })))
  708. (simplify
  709. (minus integer_zerop @1)
  710. (negate @1))
  711. /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
  712. ARG0 is zero and X + ARG0 reduces to X, since that would mean
  713. (-ARG1 + ARG0) reduces to -ARG1. */
  714. (simplify
  715. (minus real_zerop@0 @1)
  716. (if (fold_real_zero_addition_p (type, @0, 0))
  717. (negate @1)))
  718. /* Transform x * -1 into -x. */
  719. (simplify
  720. (mult @0 integer_minus_onep)
  721. (negate @0))
  722. /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
  723. (simplify
  724. (complex (realpart @0) (imagpart @0))
  725. @0)
  726. (simplify
  727. (realpart (complex @0 @1))
  728. @0)
  729. (simplify
  730. (imagpart (complex @0 @1))
  731. @1)
  732. /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
  733. (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
  734. (simplify
  735. (bswap (bswap @0))
  736. @0)
  737. (simplify
  738. (bswap (bit_not (bswap @0)))
  739. (bit_not @0))
  740. (for bitop (bit_xor bit_ior bit_and)
  741. (simplify
  742. (bswap (bitop:c (bswap @0) @1))
  743. (bitop @0 (bswap @1)))))
  744. /* Combine COND_EXPRs and VEC_COND_EXPRs. */
  745. /* Simplify constant conditions.
  746. Only optimize constant conditions when the selected branch
  747. has the same type as the COND_EXPR. This avoids optimizing
  748. away "c ? x : throw", where the throw has a void type.
  749. Note that we cannot throw away the fold-const.c variant nor
  750. this one as we depend on doing this transform before possibly
  751. A ? B : B -> B triggers and the fold-const.c one can optimize
  752. 0 ? A : B to B even if A has side-effects. Something
  753. genmatch cannot handle. */
  754. (simplify
  755. (cond INTEGER_CST@0 @1 @2)
  756. (if (integer_zerop (@0)
  757. && (!VOID_TYPE_P (TREE_TYPE (@2))
  758. || VOID_TYPE_P (type)))
  759. @2)
  760. (if (!integer_zerop (@0)
  761. && (!VOID_TYPE_P (TREE_TYPE (@1))
  762. || VOID_TYPE_P (type)))
  763. @1))
  764. (simplify
  765. (vec_cond VECTOR_CST@0 @1 @2)
  766. (if (integer_all_onesp (@0))
  767. @1)
  768. (if (integer_zerop (@0))
  769. @2))
  770. (for cnd (cond vec_cond)
  771. /* A ? B : (A ? X : C) -> A ? B : C. */
  772. (simplify
  773. (cnd @0 (cnd @0 @1 @2) @3)
  774. (cnd @0 @1 @3))
  775. (simplify
  776. (cnd @0 @1 (cnd @0 @2 @3))
  777. (cnd @0 @1 @3))
  778. /* A ? B : B -> B. */
  779. (simplify
  780. (cnd @0 @1 @1)
  781. @1)
  782. /* !A ? B : C -> A ? C : B. */
  783. (simplify
  784. (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
  785. (cnd @0 @2 @1)))
  786. /* Simplifications of comparisons. */
  787. /* We can simplify a logical negation of a comparison to the
  788. inverted comparison. As we cannot compute an expression
  789. operator using invert_tree_comparison we have to simulate
  790. that with expression code iteration. */
  791. (for cmp (tcc_comparison)
  792. icmp (inverted_tcc_comparison)
  793. ncmp (inverted_tcc_comparison_with_nans)
  794. /* Ideally we'd like to combine the following two patterns
  795. and handle some more cases by using
  796. (logical_inverted_value (cmp @0 @1))
  797. here but for that genmatch would need to "inline" that.
  798. For now implement what forward_propagate_comparison did. */
  799. (simplify
  800. (bit_not (cmp @0 @1))
  801. (if (VECTOR_TYPE_P (type)
  802. || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
  803. /* Comparison inversion may be impossible for trapping math,
  804. invert_tree_comparison will tell us. But we can't use
  805. a computed operator in the replacement tree thus we have
  806. to play the trick below. */
  807. (with { enum tree_code ic = invert_tree_comparison
  808. (cmp, HONOR_NANS (@0)); }
  809. (if (ic == icmp)
  810. (icmp @0 @1))
  811. (if (ic == ncmp)
  812. (ncmp @0 @1)))))
  813. (simplify
  814. (bit_xor (cmp @0 @1) integer_truep)
  815. (with { enum tree_code ic = invert_tree_comparison
  816. (cmp, HONOR_NANS (@0)); }
  817. (if (ic == icmp)
  818. (icmp @0 @1))
  819. (if (ic == ncmp)
  820. (ncmp @0 @1)))))
  821. /* Simplification of math builtins. */
  822. (define_operator_list LOG BUILT_IN_LOGF BUILT_IN_LOG BUILT_IN_LOGL)
  823. (define_operator_list EXP BUILT_IN_EXPF BUILT_IN_EXP BUILT_IN_EXPL)
  824. (define_operator_list LOG2 BUILT_IN_LOG2F BUILT_IN_LOG2 BUILT_IN_LOG2L)
  825. (define_operator_list EXP2 BUILT_IN_EXP2F BUILT_IN_EXP2 BUILT_IN_EXP2L)
  826. (define_operator_list LOG10 BUILT_IN_LOG10F BUILT_IN_LOG10 BUILT_IN_LOG10L)
  827. (define_operator_list EXP10 BUILT_IN_EXP10F BUILT_IN_EXP10 BUILT_IN_EXP10L)
  828. (define_operator_list POW BUILT_IN_POWF BUILT_IN_POW BUILT_IN_POWL)
  829. (define_operator_list POW10 BUILT_IN_POW10F BUILT_IN_POW10 BUILT_IN_POW10L)
  830. (define_operator_list SQRT BUILT_IN_SQRTF BUILT_IN_SQRT BUILT_IN_SQRTL)
  831. (define_operator_list CBRT BUILT_IN_CBRTF BUILT_IN_CBRT BUILT_IN_CBRTL)
  832. /* fold_builtin_logarithm */
  833. (if (flag_unsafe_math_optimizations)
  834. /* Special case, optimize logN(expN(x)) = x. */
  835. (for logs (LOG LOG2 LOG10)
  836. exps (EXP EXP2 EXP10)
  837. (simplify
  838. (logs (exps @0))
  839. @0))
  840. /* Optimize logN(func()) for various exponential functions. We
  841. want to determine the value "x" and the power "exponent" in
  842. order to transform logN(x**exponent) into exponent*logN(x). */
  843. (for logs (LOG LOG LOG LOG
  844. LOG2 LOG2 LOG2 LOG2
  845. LOG10 LOG10 LOG10 LOG10)
  846. exps (EXP EXP2 EXP10 POW10)
  847. (simplify
  848. (logs (exps @0))
  849. (with {
  850. tree x;
  851. switch (exps)
  852. {
  853. CASE_FLT_FN (BUILT_IN_EXP):
  854. /* Prepare to do logN(exp(exponent) -> exponent*logN(e). */
  855. x = build_real (type, real_value_truncate (TYPE_MODE (type),
  856. dconst_e ()));
  857. break;
  858. CASE_FLT_FN (BUILT_IN_EXP2):
  859. /* Prepare to do logN(exp2(exponent) -> exponent*logN(2). */
  860. x = build_real (type, dconst2);
  861. break;
  862. CASE_FLT_FN (BUILT_IN_EXP10):
  863. CASE_FLT_FN (BUILT_IN_POW10):
  864. /* Prepare to do logN(exp10(exponent) -> exponent*logN(10). */
  865. {
  866. REAL_VALUE_TYPE dconst10;
  867. real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
  868. x = build_real (type, dconst10);
  869. }
  870. break;
  871. }
  872. }
  873. (mult (logs { x; }) @0))))
  874. (for logs (LOG LOG
  875. LOG2 LOG2
  876. LOG10 LOG10)
  877. exps (SQRT CBRT)
  878. (simplify
  879. (logs (exps @0))
  880. (with {
  881. tree x;
  882. switch (exps)
  883. {
  884. CASE_FLT_FN (BUILT_IN_SQRT):
  885. /* Prepare to do logN(sqrt(x) -> 0.5*logN(x). */
  886. x = build_real (type, dconsthalf);
  887. break;
  888. CASE_FLT_FN (BUILT_IN_CBRT):
  889. /* Prepare to do logN(cbrt(x) -> (1/3)*logN(x). */
  890. x = build_real (type, real_value_truncate (TYPE_MODE (type),
  891. dconst_third ()));
  892. break;
  893. }
  894. }
  895. (mult { x; } (logs @0)))))
  896. /* logN(pow(x,exponent) -> exponent*logN(x). */
  897. (for logs (LOG LOG2 LOG10)
  898. pows (POW)
  899. (simplify
  900. (logs (pows @0 @1))
  901. (mult @1 (logs @0)))))
  902. /* Narrowing of arithmetic and logical operations.
  903. These are conceptually similar to the transformations performed for
  904. the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
  905. term we want to move all that code out of the front-ends into here. */
  906. /* If we have a narrowing conversion of an arithmetic operation where
  907. both operands are widening conversions from the same type as the outer
  908. narrowing conversion. Then convert the innermost operands to a suitable
  909. unsigned type (to avoid introducing undefined behaviour), perform the
  910. operation and convert the result to the desired type. */
  911. (for op (plus minus)
  912. (simplify
  913. (convert (op (convert@2 @0) (convert@3 @1)))
  914. (if (INTEGRAL_TYPE_P (type)
  915. /* We check for type compatibility between @0 and @1 below,
  916. so there's no need to check that @1/@3 are integral types. */
  917. && INTEGRAL_TYPE_P (TREE_TYPE (@0))
  918. && INTEGRAL_TYPE_P (TREE_TYPE (@2))
  919. /* The precision of the type of each operand must match the
  920. precision of the mode of each operand, similarly for the
  921. result. */
  922. && (TYPE_PRECISION (TREE_TYPE (@0))
  923. == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
  924. && (TYPE_PRECISION (TREE_TYPE (@1))
  925. == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
  926. && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
  927. /* The inner conversion must be a widening conversion. */
  928. && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
  929. && ((GENERIC
  930. && (TYPE_MAIN_VARIANT (TREE_TYPE (@0))
  931. == TYPE_MAIN_VARIANT (TREE_TYPE (@1)))
  932. && (TYPE_MAIN_VARIANT (TREE_TYPE (@0))
  933. == TYPE_MAIN_VARIANT (type)))
  934. || (GIMPLE
  935. && types_compatible_p (TREE_TYPE (@0), TREE_TYPE (@1))
  936. && types_compatible_p (TREE_TYPE (@0), type))))
  937. (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
  938. (convert (op @0 @1)))
  939. (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
  940. (convert (op (convert:utype @0) (convert:utype @1)))))))