mcore.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214
  1. /* Output routines for Motorola MCore processor
  2. Copyright (C) 1993-2015 Free Software Foundation, Inc.
  3. This file is part of GCC.
  4. GCC is free software; you can redistribute it and/or modify it
  5. under the terms of the GNU General Public License as published
  6. by the Free Software Foundation; either version 3, or (at your
  7. option) any later version.
  8. GCC is distributed in the hope that it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  10. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
  11. License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with GCC; see the file COPYING3. If not see
  14. <http://www.gnu.org/licenses/>. */
  15. #include "config.h"
  16. #include "system.h"
  17. #include "coretypes.h"
  18. #include "tm.h"
  19. #include "rtl.h"
  20. #include "hash-set.h"
  21. #include "machmode.h"
  22. #include "vec.h"
  23. #include "double-int.h"
  24. #include "input.h"
  25. #include "alias.h"
  26. #include "symtab.h"
  27. #include "wide-int.h"
  28. #include "inchash.h"
  29. #include "tree.h"
  30. #include "fold-const.h"
  31. #include "stor-layout.h"
  32. #include "varasm.h"
  33. #include "stringpool.h"
  34. #include "calls.h"
  35. #include "tm_p.h"
  36. #include "mcore.h"
  37. #include "regs.h"
  38. #include "hard-reg-set.h"
  39. #include "insn-config.h"
  40. #include "conditions.h"
  41. #include "output.h"
  42. #include "insn-attr.h"
  43. #include "flags.h"
  44. #include "obstack.h"
  45. #include "hashtab.h"
  46. #include "function.h"
  47. #include "statistics.h"
  48. #include "real.h"
  49. #include "fixed-value.h"
  50. #include "expmed.h"
  51. #include "dojump.h"
  52. #include "explow.h"
  53. #include "emit-rtl.h"
  54. #include "stmt.h"
  55. #include "expr.h"
  56. #include "reload.h"
  57. #include "recog.h"
  58. #include "ggc.h"
  59. #include "diagnostic-core.h"
  60. #include "target.h"
  61. #include "target-def.h"
  62. #include "dominance.h"
  63. #include "cfg.h"
  64. #include "cfgrtl.h"
  65. #include "cfganal.h"
  66. #include "lcm.h"
  67. #include "cfgbuild.h"
  68. #include "cfgcleanup.h"
  69. #include "predict.h"
  70. #include "basic-block.h"
  71. #include "df.h"
  72. #include "builtins.h"
  73. /* For dumping information about frame sizes. */
  74. char * mcore_current_function_name = 0;
  75. long mcore_current_compilation_timestamp = 0;
  76. /* Global variables for machine-dependent things. */
  77. /* Provides the class number of the smallest class containing
  78. reg number. */
  79. const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
  80. {
  81. GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
  82. LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
  83. LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
  84. LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
  85. GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
  86. };
  87. struct mcore_frame
  88. {
  89. int arg_size; /* Stdarg spills (bytes). */
  90. int reg_size; /* Non-volatile reg saves (bytes). */
  91. int reg_mask; /* Non-volatile reg saves. */
  92. int local_size; /* Locals. */
  93. int outbound_size; /* Arg overflow on calls out. */
  94. int pad_outbound;
  95. int pad_local;
  96. int pad_reg;
  97. /* Describe the steps we'll use to grow it. */
  98. #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
  99. int growth[MAX_STACK_GROWS];
  100. int arg_offset;
  101. int reg_offset;
  102. int reg_growth;
  103. int local_growth;
  104. };
  105. typedef enum
  106. {
  107. COND_NO,
  108. COND_MOV_INSN,
  109. COND_CLR_INSN,
  110. COND_INC_INSN,
  111. COND_DEC_INSN,
  112. COND_BRANCH_INSN
  113. }
  114. cond_type;
  115. static void output_stack_adjust (int, int);
  116. static int calc_live_regs (int *);
  117. static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
  118. static const char * output_inline_const (machine_mode, rtx *);
  119. static void layout_mcore_frame (struct mcore_frame *);
  120. static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
  121. static cond_type is_cond_candidate (rtx);
  122. static rtx_insn *emit_new_cond_insn (rtx, int);
  123. static rtx_insn *conditionalize_block (rtx_insn *);
  124. static void conditionalize_optimization (void);
  125. static void mcore_reorg (void);
  126. static rtx handle_structs_in_regs (machine_mode, const_tree, int);
  127. static void mcore_mark_dllexport (tree);
  128. static void mcore_mark_dllimport (tree);
  129. static int mcore_dllexport_p (tree);
  130. static int mcore_dllimport_p (tree);
  131. static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
  132. #ifdef OBJECT_FORMAT_ELF
  133. static void mcore_asm_named_section (const char *,
  134. unsigned int, tree);
  135. #endif
  136. static void mcore_print_operand (FILE *, rtx, int);
  137. static void mcore_print_operand_address (FILE *, rtx);
  138. static bool mcore_print_operand_punct_valid_p (unsigned char code);
  139. static void mcore_unique_section (tree, int);
  140. static void mcore_encode_section_info (tree, rtx, int);
  141. static const char *mcore_strip_name_encoding (const char *);
  142. static int mcore_const_costs (rtx, RTX_CODE);
  143. static int mcore_and_cost (rtx);
  144. static int mcore_ior_cost (rtx);
  145. static bool mcore_rtx_costs (rtx, int, int, int,
  146. int *, bool);
  147. static void mcore_external_libcall (rtx);
  148. static bool mcore_return_in_memory (const_tree, const_tree);
  149. static int mcore_arg_partial_bytes (cumulative_args_t,
  150. machine_mode,
  151. tree, bool);
  152. static rtx mcore_function_arg (cumulative_args_t,
  153. machine_mode,
  154. const_tree, bool);
  155. static void mcore_function_arg_advance (cumulative_args_t,
  156. machine_mode,
  157. const_tree, bool);
  158. static unsigned int mcore_function_arg_boundary (machine_mode,
  159. const_tree);
  160. static void mcore_asm_trampoline_template (FILE *);
  161. static void mcore_trampoline_init (rtx, tree, rtx);
  162. static bool mcore_warn_func_return (tree);
  163. static void mcore_option_override (void);
  164. static bool mcore_legitimate_constant_p (machine_mode, rtx);
  165. /* MCore specific attributes. */
  166. static const struct attribute_spec mcore_attribute_table[] =
  167. {
  168. /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
  169. affects_type_identity } */
  170. { "dllexport", 0, 0, true, false, false, NULL, false },
  171. { "dllimport", 0, 0, true, false, false, NULL, false },
  172. { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
  173. false },
  174. { NULL, 0, 0, false, false, false, NULL, false }
  175. };
  176. /* Initialize the GCC target structure. */
  177. #undef TARGET_ASM_EXTERNAL_LIBCALL
  178. #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
  179. #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
  180. #undef TARGET_MERGE_DECL_ATTRIBUTES
  181. #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
  182. #endif
  183. #ifdef OBJECT_FORMAT_ELF
  184. #undef TARGET_ASM_UNALIGNED_HI_OP
  185. #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
  186. #undef TARGET_ASM_UNALIGNED_SI_OP
  187. #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
  188. #endif
  189. #undef TARGET_PRINT_OPERAND
  190. #define TARGET_PRINT_OPERAND mcore_print_operand
  191. #undef TARGET_PRINT_OPERAND_ADDRESS
  192. #define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
  193. #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
  194. #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
  195. #undef TARGET_ATTRIBUTE_TABLE
  196. #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
  197. #undef TARGET_ASM_UNIQUE_SECTION
  198. #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
  199. #undef TARGET_ASM_FUNCTION_RODATA_SECTION
  200. #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
  201. #undef TARGET_ENCODE_SECTION_INFO
  202. #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
  203. #undef TARGET_STRIP_NAME_ENCODING
  204. #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
  205. #undef TARGET_RTX_COSTS
  206. #define TARGET_RTX_COSTS mcore_rtx_costs
  207. #undef TARGET_ADDRESS_COST
  208. #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
  209. #undef TARGET_MACHINE_DEPENDENT_REORG
  210. #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
  211. #undef TARGET_PROMOTE_FUNCTION_MODE
  212. #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
  213. #undef TARGET_PROMOTE_PROTOTYPES
  214. #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
  215. #undef TARGET_RETURN_IN_MEMORY
  216. #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
  217. #undef TARGET_MUST_PASS_IN_STACK
  218. #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
  219. #undef TARGET_PASS_BY_REFERENCE
  220. #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
  221. #undef TARGET_ARG_PARTIAL_BYTES
  222. #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
  223. #undef TARGET_FUNCTION_ARG
  224. #define TARGET_FUNCTION_ARG mcore_function_arg
  225. #undef TARGET_FUNCTION_ARG_ADVANCE
  226. #define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
  227. #undef TARGET_FUNCTION_ARG_BOUNDARY
  228. #define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
  229. #undef TARGET_SETUP_INCOMING_VARARGS
  230. #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
  231. #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
  232. #define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
  233. #undef TARGET_TRAMPOLINE_INIT
  234. #define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
  235. #undef TARGET_OPTION_OVERRIDE
  236. #define TARGET_OPTION_OVERRIDE mcore_option_override
  237. #undef TARGET_LEGITIMATE_CONSTANT_P
  238. #define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
  239. #undef TARGET_WARN_FUNC_RETURN
  240. #define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
  241. struct gcc_target targetm = TARGET_INITIALIZER;
  242. /* Adjust the stack and return the number of bytes taken to do it. */
  243. static void
  244. output_stack_adjust (int direction, int size)
  245. {
  246. /* If extending stack a lot, we do it incrementally. */
  247. if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
  248. {
  249. rtx tmp = gen_rtx_REG (SImode, 1);
  250. rtx memref;
  251. emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
  252. do
  253. {
  254. emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
  255. memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
  256. MEM_VOLATILE_P (memref) = 1;
  257. emit_insn (gen_movsi (memref, stack_pointer_rtx));
  258. size -= mcore_stack_increment;
  259. }
  260. while (size > mcore_stack_increment);
  261. /* SIZE is now the residual for the last adjustment,
  262. which doesn't require a probe. */
  263. }
  264. if (size)
  265. {
  266. rtx insn;
  267. rtx val = GEN_INT (size);
  268. if (size > 32)
  269. {
  270. rtx nval = gen_rtx_REG (SImode, 1);
  271. emit_insn (gen_movsi (nval, val));
  272. val = nval;
  273. }
  274. if (direction > 0)
  275. insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
  276. else
  277. insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
  278. emit_insn (insn);
  279. }
  280. }
  281. /* Work out the registers which need to be saved,
  282. both as a mask and a count. */
  283. static int
  284. calc_live_regs (int * count)
  285. {
  286. int reg;
  287. int live_regs_mask = 0;
  288. * count = 0;
  289. for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
  290. {
  291. if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
  292. {
  293. (*count)++;
  294. live_regs_mask |= (1 << reg);
  295. }
  296. }
  297. return live_regs_mask;
  298. }
  299. /* Print the operand address in x to the stream. */
  300. static void
  301. mcore_print_operand_address (FILE * stream, rtx x)
  302. {
  303. switch (GET_CODE (x))
  304. {
  305. case REG:
  306. fprintf (stream, "(%s)", reg_names[REGNO (x)]);
  307. break;
  308. case PLUS:
  309. {
  310. rtx base = XEXP (x, 0);
  311. rtx index = XEXP (x, 1);
  312. if (GET_CODE (base) != REG)
  313. {
  314. /* Ensure that BASE is a register (one of them must be). */
  315. rtx temp = base;
  316. base = index;
  317. index = temp;
  318. }
  319. switch (GET_CODE (index))
  320. {
  321. case CONST_INT:
  322. fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
  323. reg_names[REGNO(base)], INTVAL (index));
  324. break;
  325. default:
  326. gcc_unreachable ();
  327. }
  328. }
  329. break;
  330. default:
  331. output_addr_const (stream, x);
  332. break;
  333. }
  334. }
  335. static bool
  336. mcore_print_operand_punct_valid_p (unsigned char code)
  337. {
  338. return (code == '.' || code == '#' || code == '*' || code == '^'
  339. || code == '!');
  340. }
  341. /* Print operand x (an rtx) in assembler syntax to file stream
  342. according to modifier code.
  343. 'R' print the next register or memory location along, i.e. the lsw in
  344. a double word value
  345. 'O' print a constant without the #
  346. 'M' print a constant as its negative
  347. 'P' print log2 of a power of two
  348. 'Q' print log2 of an inverse of a power of two
  349. 'U' print register for ldm/stm instruction
  350. 'X' print byte number for xtrbN instruction. */
  351. static void
  352. mcore_print_operand (FILE * stream, rtx x, int code)
  353. {
  354. switch (code)
  355. {
  356. case 'N':
  357. if (INTVAL(x) == -1)
  358. fprintf (asm_out_file, "32");
  359. else
  360. fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
  361. break;
  362. case 'P':
  363. fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
  364. break;
  365. case 'Q':
  366. fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
  367. break;
  368. case 'O':
  369. fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
  370. break;
  371. case 'M':
  372. fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
  373. break;
  374. case 'R':
  375. /* Next location along in memory or register. */
  376. switch (GET_CODE (x))
  377. {
  378. case REG:
  379. fputs (reg_names[REGNO (x) + 1], (stream));
  380. break;
  381. case MEM:
  382. mcore_print_operand_address
  383. (stream, XEXP (adjust_address (x, SImode, 4), 0));
  384. break;
  385. default:
  386. gcc_unreachable ();
  387. }
  388. break;
  389. case 'U':
  390. fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
  391. reg_names[REGNO (x) + 3]);
  392. break;
  393. case 'x':
  394. fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
  395. break;
  396. case 'X':
  397. fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
  398. break;
  399. default:
  400. switch (GET_CODE (x))
  401. {
  402. case REG:
  403. fputs (reg_names[REGNO (x)], (stream));
  404. break;
  405. case MEM:
  406. output_address (XEXP (x, 0));
  407. break;
  408. default:
  409. output_addr_const (stream, x);
  410. break;
  411. }
  412. break;
  413. }
  414. }
  415. /* What does a constant cost ? */
  416. static int
  417. mcore_const_costs (rtx exp, enum rtx_code code)
  418. {
  419. HOST_WIDE_INT val = INTVAL (exp);
  420. /* Easy constants. */
  421. if ( CONST_OK_FOR_I (val)
  422. || CONST_OK_FOR_M (val)
  423. || CONST_OK_FOR_N (val)
  424. || (code == PLUS && CONST_OK_FOR_L (val)))
  425. return 1;
  426. else if (code == AND
  427. && ( CONST_OK_FOR_M (~val)
  428. || CONST_OK_FOR_N (~val)))
  429. return 2;
  430. else if (code == PLUS
  431. && ( CONST_OK_FOR_I (-val)
  432. || CONST_OK_FOR_M (-val)
  433. || CONST_OK_FOR_N (-val)))
  434. return 2;
  435. return 5;
  436. }
  437. /* What does an and instruction cost - we do this b/c immediates may
  438. have been relaxed. We want to ensure that cse will cse relaxed immeds
  439. out. Otherwise we'll get bad code (multiple reloads of the same const). */
  440. static int
  441. mcore_and_cost (rtx x)
  442. {
  443. HOST_WIDE_INT val;
  444. if (GET_CODE (XEXP (x, 1)) != CONST_INT)
  445. return 2;
  446. val = INTVAL (XEXP (x, 1));
  447. /* Do it directly. */
  448. if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
  449. return 2;
  450. /* Takes one instruction to load. */
  451. else if (const_ok_for_mcore (val))
  452. return 3;
  453. /* Takes two instructions to load. */
  454. else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
  455. return 4;
  456. /* Takes a lrw to load. */
  457. return 5;
  458. }
  459. /* What does an or cost - see and_cost(). */
  460. static int
  461. mcore_ior_cost (rtx x)
  462. {
  463. HOST_WIDE_INT val;
  464. if (GET_CODE (XEXP (x, 1)) != CONST_INT)
  465. return 2;
  466. val = INTVAL (XEXP (x, 1));
  467. /* Do it directly with bclri. */
  468. if (CONST_OK_FOR_M (val))
  469. return 2;
  470. /* Takes one instruction to load. */
  471. else if (const_ok_for_mcore (val))
  472. return 3;
  473. /* Takes two instructions to load. */
  474. else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
  475. return 4;
  476. /* Takes a lrw to load. */
  477. return 5;
  478. }
  479. static bool
  480. mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
  481. int * total, bool speed ATTRIBUTE_UNUSED)
  482. {
  483. switch (code)
  484. {
  485. case CONST_INT:
  486. *total = mcore_const_costs (x, (enum rtx_code) outer_code);
  487. return true;
  488. case CONST:
  489. case LABEL_REF:
  490. case SYMBOL_REF:
  491. *total = 5;
  492. return true;
  493. case CONST_DOUBLE:
  494. *total = 10;
  495. return true;
  496. case AND:
  497. *total = COSTS_N_INSNS (mcore_and_cost (x));
  498. return true;
  499. case IOR:
  500. *total = COSTS_N_INSNS (mcore_ior_cost (x));
  501. return true;
  502. case DIV:
  503. case UDIV:
  504. case MOD:
  505. case UMOD:
  506. case FLOAT:
  507. case FIX:
  508. *total = COSTS_N_INSNS (100);
  509. return true;
  510. default:
  511. return false;
  512. }
  513. }
  514. /* Prepare the operands for a comparison. Return whether the branch/setcc
  515. should reverse the operands. */
  516. bool
  517. mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
  518. {
  519. rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
  520. bool invert;
  521. if (GET_CODE (op1) == CONST_INT)
  522. {
  523. HOST_WIDE_INT val = INTVAL (op1);
  524. switch (code)
  525. {
  526. case GTU:
  527. /* Unsigned > 0 is the same as != 0; everything else is converted
  528. below to LEU (reversed cmphs). */
  529. if (val == 0)
  530. code = NE;
  531. break;
  532. /* Check whether (LE A imm) can become (LT A imm + 1),
  533. or (GT A imm) can become (GE A imm + 1). */
  534. case GT:
  535. case LE:
  536. if (CONST_OK_FOR_J (val + 1))
  537. {
  538. op1 = GEN_INT (val + 1);
  539. code = code == LE ? LT : GE;
  540. }
  541. break;
  542. default:
  543. break;
  544. }
  545. }
  546. if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
  547. op1 = force_reg (SImode, op1);
  548. /* cmpnei: 0-31 (K immediate)
  549. cmplti: 1-32 (J immediate, 0 using btsti x,31). */
  550. invert = false;
  551. switch (code)
  552. {
  553. case EQ: /* Use inverted condition, cmpne. */
  554. code = NE;
  555. invert = true;
  556. /* Drop through. */
  557. case NE: /* Use normal condition, cmpne. */
  558. if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
  559. op1 = force_reg (SImode, op1);
  560. break;
  561. case LE: /* Use inverted condition, reversed cmplt. */
  562. code = GT;
  563. invert = true;
  564. /* Drop through. */
  565. case GT: /* Use normal condition, reversed cmplt. */
  566. if (GET_CODE (op1) == CONST_INT)
  567. op1 = force_reg (SImode, op1);
  568. break;
  569. case GE: /* Use inverted condition, cmplt. */
  570. code = LT;
  571. invert = true;
  572. /* Drop through. */
  573. case LT: /* Use normal condition, cmplt. */
  574. if (GET_CODE (op1) == CONST_INT &&
  575. /* covered by btsti x,31. */
  576. INTVAL (op1) != 0 &&
  577. ! CONST_OK_FOR_J (INTVAL (op1)))
  578. op1 = force_reg (SImode, op1);
  579. break;
  580. case GTU: /* Use inverted condition, cmple. */
  581. /* We coped with unsigned > 0 above. */
  582. gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
  583. code = LEU;
  584. invert = true;
  585. /* Drop through. */
  586. case LEU: /* Use normal condition, reversed cmphs. */
  587. if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
  588. op1 = force_reg (SImode, op1);
  589. break;
  590. case LTU: /* Use inverted condition, cmphs. */
  591. code = GEU;
  592. invert = true;
  593. /* Drop through. */
  594. case GEU: /* Use normal condition, cmphs. */
  595. if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
  596. op1 = force_reg (SImode, op1);
  597. break;
  598. default:
  599. break;
  600. }
  601. emit_insn (gen_rtx_SET (VOIDmode,
  602. cc_reg,
  603. gen_rtx_fmt_ee (code, CCmode, op0, op1)));
  604. return invert;
  605. }
  606. int
  607. mcore_symbolic_address_p (rtx x)
  608. {
  609. switch (GET_CODE (x))
  610. {
  611. case SYMBOL_REF:
  612. case LABEL_REF:
  613. return 1;
  614. case CONST:
  615. x = XEXP (x, 0);
  616. return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
  617. || GET_CODE (XEXP (x, 0)) == LABEL_REF)
  618. && GET_CODE (XEXP (x, 1)) == CONST_INT);
  619. default:
  620. return 0;
  621. }
  622. }
  623. /* Functions to output assembly code for a function call. */
  624. char *
  625. mcore_output_call (rtx operands[], int index)
  626. {
  627. static char buffer[20];
  628. rtx addr = operands [index];
  629. if (REG_P (addr))
  630. {
  631. if (TARGET_CG_DATA)
  632. {
  633. gcc_assert (mcore_current_function_name);
  634. ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
  635. "unknown", 1);
  636. }
  637. sprintf (buffer, "jsr\t%%%d", index);
  638. }
  639. else
  640. {
  641. if (TARGET_CG_DATA)
  642. {
  643. gcc_assert (mcore_current_function_name);
  644. gcc_assert (GET_CODE (addr) == SYMBOL_REF);
  645. ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
  646. XSTR (addr, 0), 0);
  647. }
  648. sprintf (buffer, "jbsr\t%%%d", index);
  649. }
  650. return buffer;
  651. }
  652. /* Can we load a constant with a single instruction ? */
  653. int
  654. const_ok_for_mcore (HOST_WIDE_INT value)
  655. {
  656. if (value >= 0 && value <= 127)
  657. return 1;
  658. /* Try exact power of two. */
  659. if (CONST_OK_FOR_M (value))
  660. return 1;
  661. /* Try exact power of two - 1. */
  662. if (CONST_OK_FOR_N (value) && value != -1)
  663. return 1;
  664. return 0;
  665. }
  666. /* Can we load a constant inline with up to 2 instructions ? */
  667. int
  668. mcore_const_ok_for_inline (HOST_WIDE_INT value)
  669. {
  670. HOST_WIDE_INT x, y;
  671. return try_constant_tricks (value, & x, & y) > 0;
  672. }
  673. /* Are we loading the constant using a not ? */
  674. int
  675. mcore_const_trick_uses_not (HOST_WIDE_INT value)
  676. {
  677. HOST_WIDE_INT x, y;
  678. return try_constant_tricks (value, & x, & y) == 2;
  679. }
  680. /* Try tricks to load a constant inline and return the trick number if
  681. success (0 is non-inlinable).
  682. 0: not inlinable
  683. 1: single instruction (do the usual thing)
  684. 2: single insn followed by a 'not'
  685. 3: single insn followed by a subi
  686. 4: single insn followed by an addi
  687. 5: single insn followed by rsubi
  688. 6: single insn followed by bseti
  689. 7: single insn followed by bclri
  690. 8: single insn followed by rotli
  691. 9: single insn followed by lsli
  692. 10: single insn followed by ixh
  693. 11: single insn followed by ixw. */
  694. static int
  695. try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
  696. {
  697. HOST_WIDE_INT i;
  698. unsigned HOST_WIDE_INT bit, shf, rot;
  699. if (const_ok_for_mcore (value))
  700. return 1; /* Do the usual thing. */
  701. if (! TARGET_HARDLIT)
  702. return 0;
  703. if (const_ok_for_mcore (~value))
  704. {
  705. *x = ~value;
  706. return 2;
  707. }
  708. for (i = 1; i <= 32; i++)
  709. {
  710. if (const_ok_for_mcore (value - i))
  711. {
  712. *x = value - i;
  713. *y = i;
  714. return 3;
  715. }
  716. if (const_ok_for_mcore (value + i))
  717. {
  718. *x = value + i;
  719. *y = i;
  720. return 4;
  721. }
  722. }
  723. bit = 0x80000000ULL;
  724. for (i = 0; i <= 31; i++)
  725. {
  726. if (const_ok_for_mcore (i - value))
  727. {
  728. *x = i - value;
  729. *y = i;
  730. return 5;
  731. }
  732. if (const_ok_for_mcore (value & ~bit))
  733. {
  734. *y = bit;
  735. *x = value & ~bit;
  736. return 6;
  737. }
  738. if (const_ok_for_mcore (value | bit))
  739. {
  740. *y = ~bit;
  741. *x = value | bit;
  742. return 7;
  743. }
  744. bit >>= 1;
  745. }
  746. shf = value;
  747. rot = value;
  748. for (i = 1; i < 31; i++)
  749. {
  750. int c;
  751. /* MCore has rotate left. */
  752. c = rot << 31;
  753. rot >>= 1;
  754. rot &= 0x7FFFFFFF;
  755. rot |= c; /* Simulate rotate. */
  756. if (const_ok_for_mcore (rot))
  757. {
  758. *y = i;
  759. *x = rot;
  760. return 8;
  761. }
  762. if (shf & 1)
  763. shf = 0; /* Can't use logical shift, low order bit is one. */
  764. shf >>= 1;
  765. if (shf != 0 && const_ok_for_mcore (shf))
  766. {
  767. *y = i;
  768. *x = shf;
  769. return 9;
  770. }
  771. }
  772. if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
  773. {
  774. *x = value / 3;
  775. return 10;
  776. }
  777. if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
  778. {
  779. *x = value / 5;
  780. return 11;
  781. }
  782. return 0;
  783. }
  784. /* Check whether reg is dead at first. This is done by searching ahead
  785. for either the next use (i.e., reg is live), a death note, or a set of
  786. reg. Don't just use dead_or_set_p() since reload does not always mark
  787. deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
  788. can ignore subregs by extracting the actual register. BRC */
  789. int
  790. mcore_is_dead (rtx_insn *first, rtx reg)
  791. {
  792. rtx_insn *insn;
  793. /* For mcore, subregs can't live independently of their parent regs. */
  794. if (GET_CODE (reg) == SUBREG)
  795. reg = SUBREG_REG (reg);
  796. /* Dies immediately. */
  797. if (dead_or_set_p (first, reg))
  798. return 1;
  799. /* Look for conclusive evidence of live/death, otherwise we have
  800. to assume that it is live. */
  801. for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
  802. {
  803. if (JUMP_P (insn))
  804. return 0; /* We lose track, assume it is alive. */
  805. else if (CALL_P (insn))
  806. {
  807. /* Call's might use it for target or register parms. */
  808. if (reg_referenced_p (reg, PATTERN (insn))
  809. || find_reg_fusage (insn, USE, reg))
  810. return 0;
  811. else if (dead_or_set_p (insn, reg))
  812. return 1;
  813. }
  814. else if (NONJUMP_INSN_P (insn))
  815. {
  816. if (reg_referenced_p (reg, PATTERN (insn)))
  817. return 0;
  818. else if (dead_or_set_p (insn, reg))
  819. return 1;
  820. }
  821. }
  822. /* No conclusive evidence either way, we cannot take the chance
  823. that control flow hid the use from us -- "I'm not dead yet". */
  824. return 0;
  825. }
  826. /* Count the number of ones in mask. */
  827. int
  828. mcore_num_ones (HOST_WIDE_INT mask)
  829. {
  830. /* A trick to count set bits recently posted on comp.compilers. */
  831. mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
  832. mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
  833. mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
  834. mask = ((mask >> 8) + mask);
  835. return (mask + (mask >> 16)) & 0xff;
  836. }
  837. /* Count the number of zeros in mask. */
  838. int
  839. mcore_num_zeros (HOST_WIDE_INT mask)
  840. {
  841. return 32 - mcore_num_ones (mask);
  842. }
  843. /* Determine byte being masked. */
  844. int
  845. mcore_byte_offset (unsigned int mask)
  846. {
  847. if (mask == 0x00ffffffL)
  848. return 0;
  849. else if (mask == 0xff00ffffL)
  850. return 1;
  851. else if (mask == 0xffff00ffL)
  852. return 2;
  853. else if (mask == 0xffffff00L)
  854. return 3;
  855. return -1;
  856. }
  857. /* Determine halfword being masked. */
  858. int
  859. mcore_halfword_offset (unsigned int mask)
  860. {
  861. if (mask == 0x0000ffffL)
  862. return 0;
  863. else if (mask == 0xffff0000L)
  864. return 1;
  865. return -1;
  866. }
  867. /* Output a series of bseti's corresponding to mask. */
  868. const char *
  869. mcore_output_bseti (rtx dst, int mask)
  870. {
  871. rtx out_operands[2];
  872. int bit;
  873. out_operands[0] = dst;
  874. for (bit = 0; bit < 32; bit++)
  875. {
  876. if ((mask & 0x1) == 0x1)
  877. {
  878. out_operands[1] = GEN_INT (bit);
  879. output_asm_insn ("bseti\t%0,%1", out_operands);
  880. }
  881. mask >>= 1;
  882. }
  883. return "";
  884. }
  885. /* Output a series of bclri's corresponding to mask. */
  886. const char *
  887. mcore_output_bclri (rtx dst, int mask)
  888. {
  889. rtx out_operands[2];
  890. int bit;
  891. out_operands[0] = dst;
  892. for (bit = 0; bit < 32; bit++)
  893. {
  894. if ((mask & 0x1) == 0x0)
  895. {
  896. out_operands[1] = GEN_INT (bit);
  897. output_asm_insn ("bclri\t%0,%1", out_operands);
  898. }
  899. mask >>= 1;
  900. }
  901. return "";
  902. }
  903. /* Output a conditional move of two constants that are +/- 1 within each
  904. other. See the "movtK" patterns in mcore.md. I'm not sure this is
  905. really worth the effort. */
  906. const char *
  907. mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
  908. {
  909. HOST_WIDE_INT load_value;
  910. HOST_WIDE_INT adjust_value;
  911. rtx out_operands[4];
  912. out_operands[0] = operands[0];
  913. /* Check to see which constant is loadable. */
  914. if (const_ok_for_mcore (INTVAL (operands[1])))
  915. {
  916. out_operands[1] = operands[1];
  917. out_operands[2] = operands[2];
  918. }
  919. else if (const_ok_for_mcore (INTVAL (operands[2])))
  920. {
  921. out_operands[1] = operands[2];
  922. out_operands[2] = operands[1];
  923. /* Complement test since constants are swapped. */
  924. cmp_t = (cmp_t == 0);
  925. }
  926. load_value = INTVAL (out_operands[1]);
  927. adjust_value = INTVAL (out_operands[2]);
  928. /* First output the test if folded into the pattern. */
  929. if (test)
  930. output_asm_insn (test, operands);
  931. /* Load the constant - for now, only support constants that can be
  932. generated with a single instruction. maybe add general inlinable
  933. constants later (this will increase the # of patterns since the
  934. instruction sequence has a different length attribute). */
  935. if (load_value >= 0 && load_value <= 127)
  936. output_asm_insn ("movi\t%0,%1", out_operands);
  937. else if (CONST_OK_FOR_M (load_value))
  938. output_asm_insn ("bgeni\t%0,%P1", out_operands);
  939. else if (CONST_OK_FOR_N (load_value))
  940. output_asm_insn ("bmaski\t%0,%N1", out_operands);
  941. /* Output the constant adjustment. */
  942. if (load_value > adjust_value)
  943. {
  944. if (cmp_t)
  945. output_asm_insn ("decf\t%0", out_operands);
  946. else
  947. output_asm_insn ("dect\t%0", out_operands);
  948. }
  949. else
  950. {
  951. if (cmp_t)
  952. output_asm_insn ("incf\t%0", out_operands);
  953. else
  954. output_asm_insn ("inct\t%0", out_operands);
  955. }
  956. return "";
  957. }
  958. /* Outputs the peephole for moving a constant that gets not'ed followed
  959. by an and (i.e. combine the not and the and into andn). BRC */
  960. const char *
  961. mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
  962. {
  963. HOST_WIDE_INT x, y;
  964. rtx out_operands[3];
  965. const char * load_op;
  966. char buf[256];
  967. int trick_no;
  968. trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
  969. gcc_assert (trick_no == 2);
  970. out_operands[0] = operands[0];
  971. out_operands[1] = GEN_INT (x);
  972. out_operands[2] = operands[2];
  973. if (x >= 0 && x <= 127)
  974. load_op = "movi\t%0,%1";
  975. /* Try exact power of two. */
  976. else if (CONST_OK_FOR_M (x))
  977. load_op = "bgeni\t%0,%P1";
  978. /* Try exact power of two - 1. */
  979. else if (CONST_OK_FOR_N (x))
  980. load_op = "bmaski\t%0,%N1";
  981. else
  982. {
  983. load_op = "BADMOVI-andn\t%0, %1";
  984. gcc_unreachable ();
  985. }
  986. sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
  987. output_asm_insn (buf, out_operands);
  988. return "";
  989. }
  990. /* Output an inline constant. */
  991. static const char *
  992. output_inline_const (machine_mode mode, rtx operands[])
  993. {
  994. HOST_WIDE_INT x = 0, y = 0;
  995. int trick_no;
  996. rtx out_operands[3];
  997. char buf[256];
  998. char load_op[256];
  999. const char *dst_fmt;
  1000. HOST_WIDE_INT value;
  1001. value = INTVAL (operands[1]);
  1002. trick_no = try_constant_tricks (value, &x, &y);
  1003. /* lrw's are handled separately: Large inlinable constants never get
  1004. turned into lrw's. Our caller uses try_constant_tricks to back
  1005. off to an lrw rather than calling this routine. */
  1006. gcc_assert (trick_no != 0);
  1007. if (trick_no == 1)
  1008. x = value;
  1009. /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
  1010. out_operands[0] = operands[0];
  1011. out_operands[1] = GEN_INT (x);
  1012. if (trick_no > 2)
  1013. out_operands[2] = GEN_INT (y);
  1014. /* Select dst format based on mode. */
  1015. if (mode == DImode && (! TARGET_LITTLE_END))
  1016. dst_fmt = "%R0";
  1017. else
  1018. dst_fmt = "%0";
  1019. if (x >= 0 && x <= 127)
  1020. sprintf (load_op, "movi\t%s,%%1", dst_fmt);
  1021. /* Try exact power of two. */
  1022. else if (CONST_OK_FOR_M (x))
  1023. sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
  1024. /* Try exact power of two - 1. */
  1025. else if (CONST_OK_FOR_N (x))
  1026. sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
  1027. else
  1028. {
  1029. sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
  1030. gcc_unreachable ();
  1031. }
  1032. switch (trick_no)
  1033. {
  1034. case 1:
  1035. strcpy (buf, load_op);
  1036. break;
  1037. case 2: /* not */
  1038. sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1039. break;
  1040. case 3: /* add */
  1041. sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1042. break;
  1043. case 4: /* sub */
  1044. sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1045. break;
  1046. case 5: /* rsub */
  1047. /* Never happens unless -mrsubi, see try_constant_tricks(). */
  1048. sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1049. break;
  1050. case 6: /* bseti */
  1051. sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1052. break;
  1053. case 7: /* bclr */
  1054. sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1055. break;
  1056. case 8: /* rotl */
  1057. sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1058. break;
  1059. case 9: /* lsl */
  1060. sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
  1061. break;
  1062. case 10: /* ixh */
  1063. sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
  1064. break;
  1065. case 11: /* ixw */
  1066. sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
  1067. break;
  1068. default:
  1069. return "";
  1070. }
  1071. output_asm_insn (buf, out_operands);
  1072. return "";
  1073. }
  1074. /* Output a move of a word or less value. */
  1075. const char *
  1076. mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
  1077. machine_mode mode ATTRIBUTE_UNUSED)
  1078. {
  1079. rtx dst = operands[0];
  1080. rtx src = operands[1];
  1081. if (GET_CODE (dst) == REG)
  1082. {
  1083. if (GET_CODE (src) == REG)
  1084. {
  1085. if (REGNO (src) == CC_REG) /* r-c */
  1086. return "mvc\t%0";
  1087. else
  1088. return "mov\t%0,%1"; /* r-r*/
  1089. }
  1090. else if (GET_CODE (src) == MEM)
  1091. {
  1092. if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
  1093. return "lrw\t%0,[%1]"; /* a-R */
  1094. else
  1095. switch (GET_MODE (src)) /* r-m */
  1096. {
  1097. case SImode:
  1098. return "ldw\t%0,%1";
  1099. case HImode:
  1100. return "ld.h\t%0,%1";
  1101. case QImode:
  1102. return "ld.b\t%0,%1";
  1103. default:
  1104. gcc_unreachable ();
  1105. }
  1106. }
  1107. else if (GET_CODE (src) == CONST_INT)
  1108. {
  1109. HOST_WIDE_INT x, y;
  1110. if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
  1111. return "movi\t%0,%1";
  1112. else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
  1113. return "bgeni\t%0,%P1\t// %1 %x1";
  1114. else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
  1115. return "bmaski\t%0,%N1\t// %1 %x1";
  1116. else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
  1117. return output_inline_const (SImode, operands); /* 1-2 insns */
  1118. else
  1119. return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
  1120. }
  1121. else
  1122. return "lrw\t%0, %1"; /* Into the literal pool. */
  1123. }
  1124. else if (GET_CODE (dst) == MEM) /* m-r */
  1125. switch (GET_MODE (dst))
  1126. {
  1127. case SImode:
  1128. return "stw\t%1,%0";
  1129. case HImode:
  1130. return "st.h\t%1,%0";
  1131. case QImode:
  1132. return "st.b\t%1,%0";
  1133. default:
  1134. gcc_unreachable ();
  1135. }
  1136. gcc_unreachable ();
  1137. }
  1138. /* Return a sequence of instructions to perform DI or DF move.
  1139. Since the MCORE cannot move a DI or DF in one instruction, we have
  1140. to take care when we see overlapping source and dest registers. */
  1141. const char *
  1142. mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
  1143. {
  1144. rtx dst = operands[0];
  1145. rtx src = operands[1];
  1146. if (GET_CODE (dst) == REG)
  1147. {
  1148. if (GET_CODE (src) == REG)
  1149. {
  1150. int dstreg = REGNO (dst);
  1151. int srcreg = REGNO (src);
  1152. /* Ensure the second source not overwritten. */
  1153. if (srcreg + 1 == dstreg)
  1154. return "mov %R0,%R1\n\tmov %0,%1";
  1155. else
  1156. return "mov %0,%1\n\tmov %R0,%R1";
  1157. }
  1158. else if (GET_CODE (src) == MEM)
  1159. {
  1160. rtx memexp = XEXP (src, 0);
  1161. int dstreg = REGNO (dst);
  1162. int basereg = -1;
  1163. if (GET_CODE (memexp) == LABEL_REF)
  1164. return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
  1165. else if (GET_CODE (memexp) == REG)
  1166. basereg = REGNO (memexp);
  1167. else if (GET_CODE (memexp) == PLUS)
  1168. {
  1169. if (GET_CODE (XEXP (memexp, 0)) == REG)
  1170. basereg = REGNO (XEXP (memexp, 0));
  1171. else if (GET_CODE (XEXP (memexp, 1)) == REG)
  1172. basereg = REGNO (XEXP (memexp, 1));
  1173. else
  1174. gcc_unreachable ();
  1175. }
  1176. else
  1177. gcc_unreachable ();
  1178. /* ??? length attribute is wrong here. */
  1179. if (dstreg == basereg)
  1180. {
  1181. /* Just load them in reverse order. */
  1182. return "ldw\t%R0,%R1\n\tldw\t%0,%1";
  1183. /* XXX: alternative: move basereg to basereg+1
  1184. and then fall through. */
  1185. }
  1186. else
  1187. return "ldw\t%0,%1\n\tldw\t%R0,%R1";
  1188. }
  1189. else if (GET_CODE (src) == CONST_INT)
  1190. {
  1191. if (TARGET_LITTLE_END)
  1192. {
  1193. if (CONST_OK_FOR_I (INTVAL (src)))
  1194. output_asm_insn ("movi %0,%1", operands);
  1195. else if (CONST_OK_FOR_M (INTVAL (src)))
  1196. output_asm_insn ("bgeni %0,%P1", operands);
  1197. else if (CONST_OK_FOR_N (INTVAL (src)))
  1198. output_asm_insn ("bmaski %0,%N1", operands);
  1199. else
  1200. gcc_unreachable ();
  1201. if (INTVAL (src) < 0)
  1202. return "bmaski %R0,32";
  1203. else
  1204. return "movi %R0,0";
  1205. }
  1206. else
  1207. {
  1208. if (CONST_OK_FOR_I (INTVAL (src)))
  1209. output_asm_insn ("movi %R0,%1", operands);
  1210. else if (CONST_OK_FOR_M (INTVAL (src)))
  1211. output_asm_insn ("bgeni %R0,%P1", operands);
  1212. else if (CONST_OK_FOR_N (INTVAL (src)))
  1213. output_asm_insn ("bmaski %R0,%N1", operands);
  1214. else
  1215. gcc_unreachable ();
  1216. if (INTVAL (src) < 0)
  1217. return "bmaski %0,32";
  1218. else
  1219. return "movi %0,0";
  1220. }
  1221. }
  1222. else
  1223. gcc_unreachable ();
  1224. }
  1225. else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
  1226. return "stw\t%1,%0\n\tstw\t%R1,%R0";
  1227. else
  1228. gcc_unreachable ();
  1229. }
  1230. /* Predicates used by the templates. */
  1231. int
  1232. mcore_arith_S_operand (rtx op)
  1233. {
  1234. if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
  1235. return 1;
  1236. return 0;
  1237. }
  1238. /* Expand insert bit field. BRC */
  1239. int
  1240. mcore_expand_insv (rtx operands[])
  1241. {
  1242. int width = INTVAL (operands[1]);
  1243. int posn = INTVAL (operands[2]);
  1244. int mask;
  1245. rtx mreg, sreg, ereg;
  1246. /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
  1247. for width==1 must be removed. Look around line 368. This is something
  1248. we really want the md part to do. */
  1249. if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
  1250. {
  1251. /* Do directly with bseti or bclri. */
  1252. /* RBE: 2/97 consider only low bit of constant. */
  1253. if ((INTVAL (operands[3]) & 1) == 0)
  1254. {
  1255. mask = ~(1 << posn);
  1256. emit_insn (gen_rtx_SET (SImode, operands[0],
  1257. gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
  1258. }
  1259. else
  1260. {
  1261. mask = 1 << posn;
  1262. emit_insn (gen_rtx_SET (SImode, operands[0],
  1263. gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
  1264. }
  1265. return 1;
  1266. }
  1267. /* Look at some bit-field placements that we aren't interested
  1268. in handling ourselves, unless specifically directed to do so. */
  1269. if (! TARGET_W_FIELD)
  1270. return 0; /* Generally, give up about now. */
  1271. if (width == 8 && posn % 8 == 0)
  1272. /* Byte sized and aligned; let caller break it up. */
  1273. return 0;
  1274. if (width == 16 && posn % 16 == 0)
  1275. /* Short sized and aligned; let caller break it up. */
  1276. return 0;
  1277. /* The general case - we can do this a little bit better than what the
  1278. machine independent part tries. This will get rid of all the subregs
  1279. that mess up constant folding in combine when working with relaxed
  1280. immediates. */
  1281. /* If setting the entire field, do it directly. */
  1282. if (GET_CODE (operands[3]) == CONST_INT
  1283. && INTVAL (operands[3]) == ((1 << width) - 1))
  1284. {
  1285. mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
  1286. emit_insn (gen_rtx_SET (SImode, operands[0],
  1287. gen_rtx_IOR (SImode, operands[0], mreg)));
  1288. return 1;
  1289. }
  1290. /* Generate the clear mask. */
  1291. mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
  1292. /* Clear the field, to overlay it later with the source. */
  1293. emit_insn (gen_rtx_SET (SImode, operands[0],
  1294. gen_rtx_AND (SImode, operands[0], mreg)));
  1295. /* If the source is constant 0, we've nothing to add back. */
  1296. if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
  1297. return 1;
  1298. /* XXX: Should we worry about more games with constant values?
  1299. We've covered the high profile: set/clear single-bit and many-bit
  1300. fields. How often do we see "arbitrary bit pattern" constants? */
  1301. sreg = copy_to_mode_reg (SImode, operands[3]);
  1302. /* Extract src as same width as dst (needed for signed values). We
  1303. always have to do this since we widen everything to SImode.
  1304. We don't have to mask if we're shifting this up against the
  1305. MSB of the register (e.g., the shift will push out any hi-order
  1306. bits. */
  1307. if (width + posn != (int) GET_MODE_SIZE (SImode))
  1308. {
  1309. ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
  1310. emit_insn (gen_rtx_SET (SImode, sreg,
  1311. gen_rtx_AND (SImode, sreg, ereg)));
  1312. }
  1313. /* Insert source value in dest. */
  1314. if (posn != 0)
  1315. emit_insn (gen_rtx_SET (SImode, sreg,
  1316. gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
  1317. emit_insn (gen_rtx_SET (SImode, operands[0],
  1318. gen_rtx_IOR (SImode, operands[0], sreg)));
  1319. return 1;
  1320. }
  1321. /* ??? Block move stuff stolen from m88k. This code has not been
  1322. verified for correctness. */
  1323. /* Emit code to perform a block move. Choose the best method.
  1324. OPERANDS[0] is the destination.
  1325. OPERANDS[1] is the source.
  1326. OPERANDS[2] is the size.
  1327. OPERANDS[3] is the alignment safe to use. */
  1328. /* Emit code to perform a block move with an offset sequence of ldw/st
  1329. instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
  1330. known constants. DEST and SRC are registers. OFFSET is the known
  1331. starting point for the output pattern. */
  1332. static const machine_mode mode_from_align[] =
  1333. {
  1334. VOIDmode, QImode, HImode, VOIDmode, SImode,
  1335. };
  1336. static void
  1337. block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
  1338. {
  1339. rtx temp[2];
  1340. machine_mode mode[2];
  1341. int amount[2];
  1342. bool active[2];
  1343. int phase = 0;
  1344. int next;
  1345. int offset_ld = 0;
  1346. int offset_st = 0;
  1347. rtx x;
  1348. x = XEXP (dst_mem, 0);
  1349. if (!REG_P (x))
  1350. {
  1351. x = force_reg (Pmode, x);
  1352. dst_mem = replace_equiv_address (dst_mem, x);
  1353. }
  1354. x = XEXP (src_mem, 0);
  1355. if (!REG_P (x))
  1356. {
  1357. x = force_reg (Pmode, x);
  1358. src_mem = replace_equiv_address (src_mem, x);
  1359. }
  1360. active[0] = active[1] = false;
  1361. do
  1362. {
  1363. next = phase;
  1364. phase ^= 1;
  1365. if (size > 0)
  1366. {
  1367. int next_amount;
  1368. next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
  1369. next_amount = MIN (next_amount, align);
  1370. amount[next] = next_amount;
  1371. mode[next] = mode_from_align[next_amount];
  1372. temp[next] = gen_reg_rtx (mode[next]);
  1373. x = adjust_address (src_mem, mode[next], offset_ld);
  1374. emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
  1375. offset_ld += next_amount;
  1376. size -= next_amount;
  1377. active[next] = true;
  1378. }
  1379. if (active[phase])
  1380. {
  1381. active[phase] = false;
  1382. x = adjust_address (dst_mem, mode[phase], offset_st);
  1383. emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
  1384. offset_st += amount[phase];
  1385. }
  1386. }
  1387. while (active[next]);
  1388. }
  1389. bool
  1390. mcore_expand_block_move (rtx *operands)
  1391. {
  1392. HOST_WIDE_INT align, bytes, max;
  1393. if (GET_CODE (operands[2]) != CONST_INT)
  1394. return false;
  1395. bytes = INTVAL (operands[2]);
  1396. align = INTVAL (operands[3]);
  1397. if (bytes <= 0)
  1398. return false;
  1399. if (align > 4)
  1400. align = 4;
  1401. switch (align)
  1402. {
  1403. case 4:
  1404. if (bytes & 1)
  1405. max = 4*4;
  1406. else if (bytes & 3)
  1407. max = 8*4;
  1408. else
  1409. max = 16*4;
  1410. break;
  1411. case 2:
  1412. max = 4*2;
  1413. break;
  1414. case 1:
  1415. max = 4*1;
  1416. break;
  1417. default:
  1418. gcc_unreachable ();
  1419. }
  1420. if (bytes <= max)
  1421. {
  1422. block_move_sequence (operands[0], operands[1], bytes, align);
  1423. return true;
  1424. }
  1425. return false;
  1426. }
  1427. /* Code to generate prologue and epilogue sequences. */
  1428. static int number_of_regs_before_varargs;
  1429. /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
  1430. for a varargs function. */
  1431. static int current_function_anonymous_args;
  1432. #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
  1433. #define STORE_REACH (64) /* Maximum displace of word store + 4. */
  1434. #define ADDI_REACH (32) /* Maximum addi operand. */
  1435. static void
  1436. layout_mcore_frame (struct mcore_frame * infp)
  1437. {
  1438. int n;
  1439. unsigned int i;
  1440. int nbytes;
  1441. int regarg;
  1442. int localregarg;
  1443. int outbounds;
  1444. unsigned int growths;
  1445. int step;
  1446. /* Might have to spill bytes to re-assemble a big argument that
  1447. was passed partially in registers and partially on the stack. */
  1448. nbytes = crtl->args.pretend_args_size;
  1449. /* Determine how much space for spilled anonymous args (e.g., stdarg). */
  1450. if (current_function_anonymous_args)
  1451. nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
  1452. infp->arg_size = nbytes;
  1453. /* How much space to save non-volatile registers we stomp. */
  1454. infp->reg_mask = calc_live_regs (& n);
  1455. infp->reg_size = n * 4;
  1456. /* And the rest of it... locals and space for overflowed outbounds. */
  1457. infp->local_size = get_frame_size ();
  1458. infp->outbound_size = crtl->outgoing_args_size;
  1459. /* Make sure we have a whole number of words for the locals. */
  1460. if (infp->local_size % STACK_BYTES)
  1461. infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
  1462. /* Only thing we know we have to pad is the outbound space, since
  1463. we've aligned our locals assuming that base of locals is aligned. */
  1464. infp->pad_local = 0;
  1465. infp->pad_reg = 0;
  1466. infp->pad_outbound = 0;
  1467. if (infp->outbound_size % STACK_BYTES)
  1468. infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
  1469. /* Now we see how we want to stage the prologue so that it does
  1470. the most appropriate stack growth and register saves to either:
  1471. (1) run fast,
  1472. (2) reduce instruction space, or
  1473. (3) reduce stack space. */
  1474. for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
  1475. infp->growth[i] = 0;
  1476. regarg = infp->reg_size + infp->arg_size;
  1477. localregarg = infp->local_size + regarg;
  1478. outbounds = infp->outbound_size + infp->pad_outbound;
  1479. growths = 0;
  1480. /* XXX: Consider one where we consider localregarg + outbound too! */
  1481. /* Frame of <= 32 bytes and using stm would get <= 2 registers.
  1482. use stw's with offsets and buy the frame in one shot. */
  1483. if (localregarg <= ADDI_REACH
  1484. && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
  1485. {
  1486. /* Make sure we'll be aligned. */
  1487. if (localregarg % STACK_BYTES)
  1488. infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
  1489. step = localregarg + infp->pad_reg;
  1490. infp->reg_offset = infp->local_size;
  1491. if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
  1492. {
  1493. step += outbounds;
  1494. infp->reg_offset += outbounds;
  1495. outbounds = 0;
  1496. }
  1497. infp->arg_offset = step - 4;
  1498. infp->growth[growths++] = step;
  1499. infp->reg_growth = growths;
  1500. infp->local_growth = growths;
  1501. /* If we haven't already folded it in. */
  1502. if (outbounds)
  1503. infp->growth[growths++] = outbounds;
  1504. goto finish;
  1505. }
  1506. /* Frame can't be done with a single subi, but can be done with 2
  1507. insns. If the 'stm' is getting <= 2 registers, we use stw's and
  1508. shift some of the stack purchase into the first subi, so both are
  1509. single instructions. */
  1510. if (localregarg <= STORE_REACH
  1511. && (infp->local_size > ADDI_REACH)
  1512. && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
  1513. {
  1514. int all;
  1515. /* Make sure we'll be aligned; use either pad_reg or pad_local. */
  1516. if (localregarg % STACK_BYTES)
  1517. infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
  1518. all = localregarg + infp->pad_reg + infp->pad_local;
  1519. step = ADDI_REACH; /* As much up front as we can. */
  1520. if (step > all)
  1521. step = all;
  1522. /* XXX: Consider whether step will still be aligned; we believe so. */
  1523. infp->arg_offset = step - 4;
  1524. infp->growth[growths++] = step;
  1525. infp->reg_growth = growths;
  1526. infp->reg_offset = step - infp->pad_reg - infp->reg_size;
  1527. all -= step;
  1528. /* Can we fold in any space required for outbounds? */
  1529. if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
  1530. {
  1531. all += outbounds;
  1532. outbounds = 0;
  1533. }
  1534. /* Get the rest of the locals in place. */
  1535. step = all;
  1536. infp->growth[growths++] = step;
  1537. infp->local_growth = growths;
  1538. all -= step;
  1539. gcc_assert (all == 0);
  1540. /* Finish off if we need to do so. */
  1541. if (outbounds)
  1542. infp->growth[growths++] = outbounds;
  1543. goto finish;
  1544. }
  1545. /* Registers + args is nicely aligned, so we'll buy that in one shot.
  1546. Then we buy the rest of the frame in 1 or 2 steps depending on
  1547. whether we need a frame pointer. */
  1548. if ((regarg % STACK_BYTES) == 0)
  1549. {
  1550. infp->growth[growths++] = regarg;
  1551. infp->reg_growth = growths;
  1552. infp->arg_offset = regarg - 4;
  1553. infp->reg_offset = 0;
  1554. if (infp->local_size % STACK_BYTES)
  1555. infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
  1556. step = infp->local_size + infp->pad_local;
  1557. if (!frame_pointer_needed)
  1558. {
  1559. step += outbounds;
  1560. outbounds = 0;
  1561. }
  1562. infp->growth[growths++] = step;
  1563. infp->local_growth = growths;
  1564. /* If there's any left to be done. */
  1565. if (outbounds)
  1566. infp->growth[growths++] = outbounds;
  1567. goto finish;
  1568. }
  1569. /* XXX: optimizations that we'll want to play with....
  1570. -- regarg is not aligned, but it's a small number of registers;
  1571. use some of localsize so that regarg is aligned and then
  1572. save the registers. */
  1573. /* Simple encoding; plods down the stack buying the pieces as it goes.
  1574. -- does not optimize space consumption.
  1575. -- does not attempt to optimize instruction counts.
  1576. -- but it is safe for all alignments. */
  1577. if (regarg % STACK_BYTES != 0)
  1578. infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
  1579. infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
  1580. infp->reg_growth = growths;
  1581. infp->arg_offset = infp->growth[0] - 4;
  1582. infp->reg_offset = 0;
  1583. if (frame_pointer_needed)
  1584. {
  1585. if (infp->local_size % STACK_BYTES != 0)
  1586. infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
  1587. infp->growth[growths++] = infp->local_size + infp->pad_local;
  1588. infp->local_growth = growths;
  1589. infp->growth[growths++] = outbounds;
  1590. }
  1591. else
  1592. {
  1593. if ((infp->local_size + outbounds) % STACK_BYTES != 0)
  1594. infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
  1595. infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
  1596. infp->local_growth = growths;
  1597. }
  1598. /* Anything else that we've forgotten?, plus a few consistency checks. */
  1599. finish:
  1600. gcc_assert (infp->reg_offset >= 0);
  1601. gcc_assert (growths <= MAX_STACK_GROWS);
  1602. for (i = 0; i < growths; i++)
  1603. gcc_assert (!(infp->growth[i] % STACK_BYTES));
  1604. }
  1605. /* Define the offset between two registers, one to be eliminated, and
  1606. the other its replacement, at the start of a routine. */
  1607. int
  1608. mcore_initial_elimination_offset (int from, int to)
  1609. {
  1610. int above_frame;
  1611. int below_frame;
  1612. struct mcore_frame fi;
  1613. layout_mcore_frame (& fi);
  1614. /* fp to ap */
  1615. above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
  1616. /* sp to fp */
  1617. below_frame = fi.outbound_size + fi.pad_outbound;
  1618. if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
  1619. return above_frame;
  1620. if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
  1621. return above_frame + below_frame;
  1622. if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
  1623. return below_frame;
  1624. gcc_unreachable ();
  1625. }
  1626. /* Keep track of some information about varargs for the prolog. */
  1627. static void
  1628. mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
  1629. machine_mode mode, tree type,
  1630. int * ptr_pretend_size ATTRIBUTE_UNUSED,
  1631. int second_time ATTRIBUTE_UNUSED)
  1632. {
  1633. CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
  1634. current_function_anonymous_args = 1;
  1635. /* We need to know how many argument registers are used before
  1636. the varargs start, so that we can push the remaining argument
  1637. registers during the prologue. */
  1638. number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
  1639. /* There is a bug somewhere in the arg handling code.
  1640. Until I can find it this workaround always pushes the
  1641. last named argument onto the stack. */
  1642. number_of_regs_before_varargs = *args_so_far;
  1643. /* The last named argument may be split between argument registers
  1644. and the stack. Allow for this here. */
  1645. if (number_of_regs_before_varargs > NPARM_REGS)
  1646. number_of_regs_before_varargs = NPARM_REGS;
  1647. }
  1648. void
  1649. mcore_expand_prolog (void)
  1650. {
  1651. struct mcore_frame fi;
  1652. int space_allocated = 0;
  1653. int growth = 0;
  1654. /* Find out what we're doing. */
  1655. layout_mcore_frame (&fi);
  1656. space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
  1657. fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
  1658. if (TARGET_CG_DATA)
  1659. {
  1660. /* Emit a symbol for this routine's frame size. */
  1661. rtx x;
  1662. x = DECL_RTL (current_function_decl);
  1663. gcc_assert (GET_CODE (x) == MEM);
  1664. x = XEXP (x, 0);
  1665. gcc_assert (GET_CODE (x) == SYMBOL_REF);
  1666. free (mcore_current_function_name);
  1667. mcore_current_function_name = xstrdup (XSTR (x, 0));
  1668. ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
  1669. if (cfun->calls_alloca)
  1670. ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
  1671. /* 970425: RBE:
  1672. We're looking at how the 8byte alignment affects stack layout
  1673. and where we had to pad things. This emits information we can
  1674. extract which tells us about frame sizes and the like. */
  1675. fprintf (asm_out_file,
  1676. "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
  1677. mcore_current_function_name,
  1678. fi.arg_size, fi.reg_size, fi.reg_mask,
  1679. fi.local_size, fi.outbound_size,
  1680. frame_pointer_needed);
  1681. }
  1682. if (mcore_naked_function_p ())
  1683. return;
  1684. /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
  1685. output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
  1686. /* If we have a parameter passed partially in regs and partially in memory,
  1687. the registers will have been stored to memory already in function.c. So
  1688. we only need to do something here for varargs functions. */
  1689. if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
  1690. {
  1691. int offset;
  1692. int rn = FIRST_PARM_REG + NPARM_REGS - 1;
  1693. int remaining = fi.arg_size;
  1694. for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
  1695. {
  1696. emit_insn (gen_movsi
  1697. (gen_rtx_MEM (SImode,
  1698. plus_constant (Pmode, stack_pointer_rtx,
  1699. offset)),
  1700. gen_rtx_REG (SImode, rn)));
  1701. }
  1702. }
  1703. /* Do we need another stack adjustment before we do the register saves? */
  1704. if (growth < fi.reg_growth)
  1705. output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
  1706. if (fi.reg_size != 0)
  1707. {
  1708. int i;
  1709. int offs = fi.reg_offset;
  1710. for (i = 15; i >= 0; i--)
  1711. {
  1712. if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
  1713. {
  1714. int first_reg = 15;
  1715. while (fi.reg_mask & (1 << first_reg))
  1716. first_reg--;
  1717. first_reg++;
  1718. emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
  1719. gen_rtx_REG (SImode, first_reg),
  1720. GEN_INT (16 - first_reg)));
  1721. i -= (15 - first_reg);
  1722. offs += (16 - first_reg) * 4;
  1723. }
  1724. else if (fi.reg_mask & (1 << i))
  1725. {
  1726. emit_insn (gen_movsi
  1727. (gen_rtx_MEM (SImode,
  1728. plus_constant (Pmode, stack_pointer_rtx,
  1729. offs)),
  1730. gen_rtx_REG (SImode, i)));
  1731. offs += 4;
  1732. }
  1733. }
  1734. }
  1735. /* Figure the locals + outbounds. */
  1736. if (frame_pointer_needed)
  1737. {
  1738. /* If we haven't already purchased to 'fp'. */
  1739. if (growth < fi.local_growth)
  1740. output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
  1741. emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
  1742. /* ... and then go any remaining distance for outbounds, etc. */
  1743. if (fi.growth[growth])
  1744. output_stack_adjust (-1, fi.growth[growth++]);
  1745. }
  1746. else
  1747. {
  1748. if (growth < fi.local_growth)
  1749. output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
  1750. if (fi.growth[growth])
  1751. output_stack_adjust (-1, fi.growth[growth++]);
  1752. }
  1753. }
  1754. void
  1755. mcore_expand_epilog (void)
  1756. {
  1757. struct mcore_frame fi;
  1758. int i;
  1759. int offs;
  1760. int growth = MAX_STACK_GROWS - 1 ;
  1761. /* Find out what we're doing. */
  1762. layout_mcore_frame(&fi);
  1763. if (mcore_naked_function_p ())
  1764. return;
  1765. /* If we had a frame pointer, restore the sp from that. */
  1766. if (frame_pointer_needed)
  1767. {
  1768. emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
  1769. growth = fi.local_growth - 1;
  1770. }
  1771. else
  1772. {
  1773. /* XXX: while loop should accumulate and do a single sell. */
  1774. while (growth >= fi.local_growth)
  1775. {
  1776. if (fi.growth[growth] != 0)
  1777. output_stack_adjust (1, fi.growth[growth]);
  1778. growth--;
  1779. }
  1780. }
  1781. /* Make sure we've shrunk stack back to the point where the registers
  1782. were laid down. This is typically 0/1 iterations. Then pull the
  1783. register save information back off the stack. */
  1784. while (growth >= fi.reg_growth)
  1785. output_stack_adjust ( 1, fi.growth[growth--]);
  1786. offs = fi.reg_offset;
  1787. for (i = 15; i >= 0; i--)
  1788. {
  1789. if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
  1790. {
  1791. int first_reg;
  1792. /* Find the starting register. */
  1793. first_reg = 15;
  1794. while (fi.reg_mask & (1 << first_reg))
  1795. first_reg--;
  1796. first_reg++;
  1797. emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
  1798. gen_rtx_MEM (SImode, stack_pointer_rtx),
  1799. GEN_INT (16 - first_reg)));
  1800. i -= (15 - first_reg);
  1801. offs += (16 - first_reg) * 4;
  1802. }
  1803. else if (fi.reg_mask & (1 << i))
  1804. {
  1805. emit_insn (gen_movsi
  1806. (gen_rtx_REG (SImode, i),
  1807. gen_rtx_MEM (SImode,
  1808. plus_constant (Pmode, stack_pointer_rtx,
  1809. offs))));
  1810. offs += 4;
  1811. }
  1812. }
  1813. /* Give back anything else. */
  1814. /* XXX: Should accumulate total and then give it back. */
  1815. while (growth >= 0)
  1816. output_stack_adjust ( 1, fi.growth[growth--]);
  1817. }
  1818. /* This code is borrowed from the SH port. */
  1819. /* The MCORE cannot load a large constant into a register, constants have to
  1820. come from a pc relative load. The reference of a pc relative load
  1821. instruction must be less than 1k in front of the instruction. This
  1822. means that we often have to dump a constant inside a function, and
  1823. generate code to branch around it.
  1824. It is important to minimize this, since the branches will slow things
  1825. down and make things bigger.
  1826. Worst case code looks like:
  1827. lrw L1,r0
  1828. br L2
  1829. align
  1830. L1: .long value
  1831. L2:
  1832. ..
  1833. lrw L3,r0
  1834. br L4
  1835. align
  1836. L3: .long value
  1837. L4:
  1838. ..
  1839. We fix this by performing a scan before scheduling, which notices which
  1840. instructions need to have their operands fetched from the constant table
  1841. and builds the table.
  1842. The algorithm is:
  1843. scan, find an instruction which needs a pcrel move. Look forward, find the
  1844. last barrier which is within MAX_COUNT bytes of the requirement.
  1845. If there isn't one, make one. Process all the instructions between
  1846. the find and the barrier.
  1847. In the above example, we can tell that L3 is within 1k of L1, so
  1848. the first move can be shrunk from the 2 insn+constant sequence into
  1849. just 1 insn, and the constant moved to L3 to make:
  1850. lrw L1,r0
  1851. ..
  1852. lrw L3,r0
  1853. bra L4
  1854. align
  1855. L3:.long value
  1856. L4:.long value
  1857. Then the second move becomes the target for the shortening process. */
  1858. typedef struct
  1859. {
  1860. rtx value; /* Value in table. */
  1861. rtx label; /* Label of value. */
  1862. } pool_node;
  1863. /* The maximum number of constants that can fit into one pool, since
  1864. the pc relative range is 0...1020 bytes and constants are at least 4
  1865. bytes long. We subtract 4 from the range to allow for the case where
  1866. we need to add a branch/align before the constant pool. */
  1867. #define MAX_COUNT 1016
  1868. #define MAX_POOL_SIZE (MAX_COUNT/4)
  1869. static pool_node pool_vector[MAX_POOL_SIZE];
  1870. static int pool_size;
  1871. /* Dump out any constants accumulated in the final pass. These
  1872. will only be labels. */
  1873. const char *
  1874. mcore_output_jump_label_table (void)
  1875. {
  1876. int i;
  1877. if (pool_size)
  1878. {
  1879. fprintf (asm_out_file, "\t.align 2\n");
  1880. for (i = 0; i < pool_size; i++)
  1881. {
  1882. pool_node * p = pool_vector + i;
  1883. (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
  1884. output_asm_insn (".long %0", &p->value);
  1885. }
  1886. pool_size = 0;
  1887. }
  1888. return "";
  1889. }
  1890. /* Check whether insn is a candidate for a conditional. */
  1891. static cond_type
  1892. is_cond_candidate (rtx insn)
  1893. {
  1894. /* The only things we conditionalize are those that can be directly
  1895. changed into a conditional. Only bother with SImode items. If
  1896. we wanted to be a little more aggressive, we could also do other
  1897. modes such as DImode with reg-reg move or load 0. */
  1898. if (NONJUMP_INSN_P (insn))
  1899. {
  1900. rtx pat = PATTERN (insn);
  1901. rtx src, dst;
  1902. if (GET_CODE (pat) != SET)
  1903. return COND_NO;
  1904. dst = XEXP (pat, 0);
  1905. if ((GET_CODE (dst) != REG &&
  1906. GET_CODE (dst) != SUBREG) ||
  1907. GET_MODE (dst) != SImode)
  1908. return COND_NO;
  1909. src = XEXP (pat, 1);
  1910. if ((GET_CODE (src) == REG ||
  1911. (GET_CODE (src) == SUBREG &&
  1912. GET_CODE (SUBREG_REG (src)) == REG)) &&
  1913. GET_MODE (src) == SImode)
  1914. return COND_MOV_INSN;
  1915. else if (GET_CODE (src) == CONST_INT &&
  1916. INTVAL (src) == 0)
  1917. return COND_CLR_INSN;
  1918. else if (GET_CODE (src) == PLUS &&
  1919. (GET_CODE (XEXP (src, 0)) == REG ||
  1920. (GET_CODE (XEXP (src, 0)) == SUBREG &&
  1921. GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
  1922. GET_MODE (XEXP (src, 0)) == SImode &&
  1923. GET_CODE (XEXP (src, 1)) == CONST_INT &&
  1924. INTVAL (XEXP (src, 1)) == 1)
  1925. return COND_INC_INSN;
  1926. else if (((GET_CODE (src) == MINUS &&
  1927. GET_CODE (XEXP (src, 1)) == CONST_INT &&
  1928. INTVAL( XEXP (src, 1)) == 1) ||
  1929. (GET_CODE (src) == PLUS &&
  1930. GET_CODE (XEXP (src, 1)) == CONST_INT &&
  1931. INTVAL (XEXP (src, 1)) == -1)) &&
  1932. (GET_CODE (XEXP (src, 0)) == REG ||
  1933. (GET_CODE (XEXP (src, 0)) == SUBREG &&
  1934. GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
  1935. GET_MODE (XEXP (src, 0)) == SImode)
  1936. return COND_DEC_INSN;
  1937. /* Some insns that we don't bother with:
  1938. (set (rx:DI) (ry:DI))
  1939. (set (rx:DI) (const_int 0))
  1940. */
  1941. }
  1942. else if (JUMP_P (insn)
  1943. && GET_CODE (PATTERN (insn)) == SET
  1944. && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
  1945. return COND_BRANCH_INSN;
  1946. return COND_NO;
  1947. }
  1948. /* Emit a conditional version of insn and replace the old insn with the
  1949. new one. Return the new insn if emitted. */
  1950. static rtx_insn *
  1951. emit_new_cond_insn (rtx insn, int cond)
  1952. {
  1953. rtx c_insn = 0;
  1954. rtx pat, dst, src;
  1955. cond_type num;
  1956. if ((num = is_cond_candidate (insn)) == COND_NO)
  1957. return NULL;
  1958. pat = PATTERN (insn);
  1959. if (NONJUMP_INSN_P (insn))
  1960. {
  1961. dst = SET_DEST (pat);
  1962. src = SET_SRC (pat);
  1963. }
  1964. else
  1965. {
  1966. dst = JUMP_LABEL (insn);
  1967. src = NULL_RTX;
  1968. }
  1969. switch (num)
  1970. {
  1971. case COND_MOV_INSN:
  1972. case COND_CLR_INSN:
  1973. if (cond)
  1974. c_insn = gen_movt0 (dst, src, dst);
  1975. else
  1976. c_insn = gen_movt0 (dst, dst, src);
  1977. break;
  1978. case COND_INC_INSN:
  1979. if (cond)
  1980. c_insn = gen_incscc (dst, dst);
  1981. else
  1982. c_insn = gen_incscc_false (dst, dst);
  1983. break;
  1984. case COND_DEC_INSN:
  1985. if (cond)
  1986. c_insn = gen_decscc (dst, dst);
  1987. else
  1988. c_insn = gen_decscc_false (dst, dst);
  1989. break;
  1990. case COND_BRANCH_INSN:
  1991. if (cond)
  1992. c_insn = gen_branch_true (dst);
  1993. else
  1994. c_insn = gen_branch_false (dst);
  1995. break;
  1996. default:
  1997. return NULL;
  1998. }
  1999. /* Only copy the notes if they exist. */
  2000. if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
  2001. {
  2002. /* We really don't need to bother with the notes and links at this
  2003. point, but go ahead and save the notes. This will help is_dead()
  2004. when applying peepholes (links don't matter since they are not
  2005. used any more beyond this point for the mcore). */
  2006. REG_NOTES (c_insn) = REG_NOTES (insn);
  2007. }
  2008. if (num == COND_BRANCH_INSN)
  2009. {
  2010. /* For jumps, we need to be a little bit careful and emit the new jump
  2011. before the old one and to update the use count for the target label.
  2012. This way, the barrier following the old (uncond) jump will get
  2013. deleted, but the label won't. */
  2014. c_insn = emit_jump_insn_before (c_insn, insn);
  2015. ++ LABEL_NUSES (dst);
  2016. JUMP_LABEL (c_insn) = dst;
  2017. }
  2018. else
  2019. c_insn = emit_insn_after (c_insn, insn);
  2020. delete_insn (insn);
  2021. return as_a <rtx_insn *> (c_insn);
  2022. }
  2023. /* Attempt to change a basic block into a series of conditional insns. This
  2024. works by taking the branch at the end of the 1st block and scanning for the
  2025. end of the 2nd block. If all instructions in the 2nd block have cond.
  2026. versions and the label at the start of block 3 is the same as the target
  2027. from the branch at block 1, then conditionalize all insn in block 2 using
  2028. the inverse condition of the branch at block 1. (Note I'm bending the
  2029. definition of basic block here.)
  2030. e.g., change:
  2031. bt L2 <-- end of block 1 (delete)
  2032. mov r7,r8
  2033. addu r7,1
  2034. br L3 <-- end of block 2
  2035. L2: ... <-- start of block 3 (NUSES==1)
  2036. L3: ...
  2037. to:
  2038. movf r7,r8
  2039. incf r7
  2040. bf L3
  2041. L3: ...
  2042. we can delete the L2 label if NUSES==1 and re-apply the optimization
  2043. starting at the last instruction of block 2. This may allow an entire
  2044. if-then-else statement to be conditionalized. BRC */
  2045. static rtx_insn *
  2046. conditionalize_block (rtx_insn *first)
  2047. {
  2048. rtx_insn *insn;
  2049. rtx br_pat;
  2050. rtx_insn *end_blk_1_br = 0;
  2051. rtx_insn *end_blk_2_insn = 0;
  2052. rtx_insn *start_blk_3_lab = 0;
  2053. int cond;
  2054. int br_lab_num;
  2055. int blk_size = 0;
  2056. /* Check that the first insn is a candidate conditional jump. This is
  2057. the one that we'll eliminate. If not, advance to the next insn to
  2058. try. */
  2059. if (! JUMP_P (first)
  2060. || GET_CODE (PATTERN (first)) != SET
  2061. || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
  2062. return NEXT_INSN (first);
  2063. /* Extract some information we need. */
  2064. end_blk_1_br = first;
  2065. br_pat = PATTERN (end_blk_1_br);
  2066. /* Complement the condition since we use the reverse cond. for the insns. */
  2067. cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
  2068. /* Determine what kind of branch we have. */
  2069. if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
  2070. {
  2071. /* A normal branch, so extract label out of first arm. */
  2072. br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
  2073. }
  2074. else
  2075. {
  2076. /* An inverse branch, so extract the label out of the 2nd arm
  2077. and complement the condition. */
  2078. cond = (cond == 0);
  2079. br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
  2080. }
  2081. /* Scan forward for the start of block 2: it must start with a
  2082. label and that label must be the same as the branch target
  2083. label from block 1. We don't care about whether block 2 actually
  2084. ends with a branch or a label (an uncond. branch is
  2085. conditionalizable). */
  2086. for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
  2087. {
  2088. enum rtx_code code;
  2089. code = GET_CODE (insn);
  2090. /* Look for the label at the start of block 3. */
  2091. if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
  2092. break;
  2093. /* Skip barriers, notes, and conditionalizable insns. If the
  2094. insn is not conditionalizable or makes this optimization fail,
  2095. just return the next insn so we can start over from that point. */
  2096. if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
  2097. return NEXT_INSN (insn);
  2098. /* Remember the last real insn before the label (i.e. end of block 2). */
  2099. if (code == JUMP_INSN || code == INSN)
  2100. {
  2101. blk_size ++;
  2102. end_blk_2_insn = insn;
  2103. }
  2104. }
  2105. if (!insn)
  2106. return insn;
  2107. /* It is possible for this optimization to slow performance if the blocks
  2108. are long. This really depends upon whether the branch is likely taken
  2109. or not. If the branch is taken, we slow performance in many cases. But,
  2110. if the branch is not taken, we always help performance (for a single
  2111. block, but for a double block (i.e. when the optimization is re-applied)
  2112. this is not true since the 'right thing' depends on the overall length of
  2113. the collapsed block). As a compromise, don't apply this optimization on
  2114. blocks larger than size 2 (unlikely for the mcore) when speed is important.
  2115. the best threshold depends on the latencies of the instructions (i.e.,
  2116. the branch penalty). */
  2117. if (optimize > 1 && blk_size > 2)
  2118. return insn;
  2119. /* At this point, we've found the start of block 3 and we know that
  2120. it is the destination of the branch from block 1. Also, all
  2121. instructions in the block 2 are conditionalizable. So, apply the
  2122. conditionalization and delete the branch. */
  2123. start_blk_3_lab = insn;
  2124. for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
  2125. insn = NEXT_INSN (insn))
  2126. {
  2127. rtx_insn *newinsn;
  2128. if (insn->deleted ())
  2129. continue;
  2130. /* Try to form a conditional variant of the instruction and emit it. */
  2131. if ((newinsn = emit_new_cond_insn (insn, cond)))
  2132. {
  2133. if (end_blk_2_insn == insn)
  2134. end_blk_2_insn = newinsn;
  2135. insn = newinsn;
  2136. }
  2137. }
  2138. /* Note whether we will delete the label starting blk 3 when the jump
  2139. gets deleted. If so, we want to re-apply this optimization at the
  2140. last real instruction right before the label. */
  2141. if (LABEL_NUSES (start_blk_3_lab) == 1)
  2142. {
  2143. start_blk_3_lab = 0;
  2144. }
  2145. /* ??? we probably should redistribute the death notes for this insn, esp.
  2146. the death of cc, but it doesn't really matter this late in the game.
  2147. The peepholes all use is_dead() which will find the correct death
  2148. regardless of whether there is a note. */
  2149. delete_insn (end_blk_1_br);
  2150. if (! start_blk_3_lab)
  2151. return end_blk_2_insn;
  2152. /* Return the insn right after the label at the start of block 3. */
  2153. return NEXT_INSN (start_blk_3_lab);
  2154. }
  2155. /* Apply the conditionalization of blocks optimization. This is the
  2156. outer loop that traverses through the insns scanning for a branch
  2157. that signifies an opportunity to apply the optimization. Note that
  2158. this optimization is applied late. If we could apply it earlier,
  2159. say before cse 2, it may expose more optimization opportunities.
  2160. but, the pay back probably isn't really worth the effort (we'd have
  2161. to update all reg/flow/notes/links/etc to make it work - and stick it
  2162. in before cse 2). */
  2163. static void
  2164. conditionalize_optimization (void)
  2165. {
  2166. rtx_insn *insn;
  2167. for (insn = get_insns (); insn; insn = conditionalize_block (insn))
  2168. continue;
  2169. }
  2170. /* This is to handle loads from the constant pool. */
  2171. static void
  2172. mcore_reorg (void)
  2173. {
  2174. /* Reset this variable. */
  2175. current_function_anonymous_args = 0;
  2176. if (optimize == 0)
  2177. return;
  2178. /* Conditionalize blocks where we can. */
  2179. conditionalize_optimization ();
  2180. /* Literal pool generation is now pushed off until the assembler. */
  2181. }
  2182. /* Return true if X is something that can be moved directly into r15. */
  2183. bool
  2184. mcore_r15_operand_p (rtx x)
  2185. {
  2186. switch (GET_CODE (x))
  2187. {
  2188. case CONST_INT:
  2189. return mcore_const_ok_for_inline (INTVAL (x));
  2190. case REG:
  2191. case SUBREG:
  2192. case MEM:
  2193. return 1;
  2194. default:
  2195. return 0;
  2196. }
  2197. }
  2198. /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
  2199. directly move X into it, use r1-r14 as a temporary. */
  2200. enum reg_class
  2201. mcore_secondary_reload_class (enum reg_class rclass,
  2202. machine_mode mode ATTRIBUTE_UNUSED, rtx x)
  2203. {
  2204. if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
  2205. && !mcore_r15_operand_p (x))
  2206. return LRW_REGS;
  2207. return NO_REGS;
  2208. }
  2209. /* Return the reg_class to use when reloading the rtx X into the class
  2210. RCLASS. If X is too complex to move directly into r15, prefer to
  2211. use LRW_REGS instead. */
  2212. enum reg_class
  2213. mcore_reload_class (rtx x, enum reg_class rclass)
  2214. {
  2215. if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
  2216. return LRW_REGS;
  2217. return rclass;
  2218. }
  2219. /* Tell me if a pair of reg/subreg rtx's actually refer to the same
  2220. register. Note that the current version doesn't worry about whether
  2221. they are the same mode or note (e.g., a QImode in r2 matches an HImode
  2222. in r2 matches an SImode in r2. Might think in the future about whether
  2223. we want to be able to say something about modes. */
  2224. int
  2225. mcore_is_same_reg (rtx x, rtx y)
  2226. {
  2227. /* Strip any and all of the subreg wrappers. */
  2228. while (GET_CODE (x) == SUBREG)
  2229. x = SUBREG_REG (x);
  2230. while (GET_CODE (y) == SUBREG)
  2231. y = SUBREG_REG (y);
  2232. if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
  2233. return 1;
  2234. return 0;
  2235. }
  2236. static void
  2237. mcore_option_override (void)
  2238. {
  2239. /* Only the m340 supports little endian code. */
  2240. if (TARGET_LITTLE_END && ! TARGET_M340)
  2241. target_flags |= MASK_M340;
  2242. }
  2243. /* Compute the number of word sized registers needed to
  2244. hold a function argument of mode MODE and type TYPE. */
  2245. int
  2246. mcore_num_arg_regs (machine_mode mode, const_tree type)
  2247. {
  2248. int size;
  2249. if (targetm.calls.must_pass_in_stack (mode, type))
  2250. return 0;
  2251. if (type && mode == BLKmode)
  2252. size = int_size_in_bytes (type);
  2253. else
  2254. size = GET_MODE_SIZE (mode);
  2255. return ROUND_ADVANCE (size);
  2256. }
  2257. static rtx
  2258. handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
  2259. {
  2260. int size;
  2261. /* The MCore ABI defines that a structure whose size is not a whole multiple
  2262. of bytes is passed packed into registers (or spilled onto the stack if
  2263. not enough registers are available) with the last few bytes of the
  2264. structure being packed, left-justified, into the last register/stack slot.
  2265. GCC handles this correctly if the last word is in a stack slot, but we
  2266. have to generate a special, PARALLEL RTX if the last word is in an
  2267. argument register. */
  2268. if (type
  2269. && TYPE_MODE (type) == BLKmode
  2270. && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
  2271. && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
  2272. && (size % UNITS_PER_WORD != 0)
  2273. && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
  2274. {
  2275. rtx arg_regs [NPARM_REGS];
  2276. int nregs;
  2277. rtx result;
  2278. rtvec rtvec;
  2279. for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
  2280. {
  2281. arg_regs [nregs] =
  2282. gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
  2283. GEN_INT (nregs * UNITS_PER_WORD));
  2284. nregs ++;
  2285. }
  2286. /* We assume here that NPARM_REGS == 6. The assert checks this. */
  2287. gcc_assert (ARRAY_SIZE (arg_regs) == 6);
  2288. rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
  2289. arg_regs[3], arg_regs[4], arg_regs[5]);
  2290. result = gen_rtx_PARALLEL (mode, rtvec);
  2291. return result;
  2292. }
  2293. return gen_rtx_REG (mode, reg);
  2294. }
  2295. rtx
  2296. mcore_function_value (const_tree valtype, const_tree func)
  2297. {
  2298. machine_mode mode;
  2299. int unsigned_p;
  2300. mode = TYPE_MODE (valtype);
  2301. /* Since we promote return types, we must promote the mode here too. */
  2302. mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
  2303. return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
  2304. }
  2305. /* Define where to put the arguments to a function.
  2306. Value is zero to push the argument on the stack,
  2307. or a hard register in which to store the argument.
  2308. MODE is the argument's machine mode.
  2309. TYPE is the data type of the argument (as a tree).
  2310. This is null for libcalls where that information may
  2311. not be available.
  2312. CUM is a variable of type CUMULATIVE_ARGS which gives info about
  2313. the preceding args and about the function being called.
  2314. NAMED is nonzero if this argument is a named parameter
  2315. (otherwise it is an extra parameter matching an ellipsis).
  2316. On MCore the first args are normally in registers
  2317. and the rest are pushed. Any arg that starts within the first
  2318. NPARM_REGS words is at least partially passed in a register unless
  2319. its data type forbids. */
  2320. static rtx
  2321. mcore_function_arg (cumulative_args_t cum, machine_mode mode,
  2322. const_tree type, bool named)
  2323. {
  2324. int arg_reg;
  2325. if (! named || mode == VOIDmode)
  2326. return 0;
  2327. if (targetm.calls.must_pass_in_stack (mode, type))
  2328. return 0;
  2329. arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
  2330. if (arg_reg < NPARM_REGS)
  2331. return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
  2332. return 0;
  2333. }
  2334. static void
  2335. mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
  2336. const_tree type, bool named ATTRIBUTE_UNUSED)
  2337. {
  2338. CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  2339. *cum = (ROUND_REG (*cum, mode)
  2340. + (int)named * mcore_num_arg_regs (mode, type));
  2341. }
  2342. static unsigned int
  2343. mcore_function_arg_boundary (machine_mode mode,
  2344. const_tree type ATTRIBUTE_UNUSED)
  2345. {
  2346. /* Doubles must be aligned to an 8 byte boundary. */
  2347. return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
  2348. ? BIGGEST_ALIGNMENT
  2349. : PARM_BOUNDARY);
  2350. }
  2351. /* Returns the number of bytes of argument registers required to hold *part*
  2352. of a parameter of machine mode MODE and type TYPE (which may be NULL if
  2353. the type is not known). If the argument fits entirely in the argument
  2354. registers, or entirely on the stack, then 0 is returned. CUM is the
  2355. number of argument registers already used by earlier parameters to
  2356. the function. */
  2357. static int
  2358. mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
  2359. tree type, bool named)
  2360. {
  2361. int reg = ROUND_REG (*get_cumulative_args (cum), mode);
  2362. if (named == 0)
  2363. return 0;
  2364. if (targetm.calls.must_pass_in_stack (mode, type))
  2365. return 0;
  2366. /* REG is not the *hardware* register number of the register that holds
  2367. the argument, it is the *argument* register number. So for example,
  2368. the first argument to a function goes in argument register 0, which
  2369. translates (for the MCore) into hardware register 2. The second
  2370. argument goes into argument register 1, which translates into hardware
  2371. register 3, and so on. NPARM_REGS is the number of argument registers
  2372. supported by the target, not the maximum hardware register number of
  2373. the target. */
  2374. if (reg >= NPARM_REGS)
  2375. return 0;
  2376. /* If the argument fits entirely in registers, return 0. */
  2377. if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
  2378. return 0;
  2379. /* The argument overflows the number of available argument registers.
  2380. Compute how many argument registers have not yet been assigned to
  2381. hold an argument. */
  2382. reg = NPARM_REGS - reg;
  2383. /* Return partially in registers and partially on the stack. */
  2384. return reg * UNITS_PER_WORD;
  2385. }
  2386. /* Return nonzero if SYMBOL is marked as being dllexport'd. */
  2387. int
  2388. mcore_dllexport_name_p (const char * symbol)
  2389. {
  2390. return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
  2391. }
  2392. /* Return nonzero if SYMBOL is marked as being dllimport'd. */
  2393. int
  2394. mcore_dllimport_name_p (const char * symbol)
  2395. {
  2396. return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
  2397. }
  2398. /* Mark a DECL as being dllexport'd. */
  2399. static void
  2400. mcore_mark_dllexport (tree decl)
  2401. {
  2402. const char * oldname;
  2403. char * newname;
  2404. rtx rtlname;
  2405. tree idp;
  2406. rtlname = XEXP (DECL_RTL (decl), 0);
  2407. if (GET_CODE (rtlname) == MEM)
  2408. rtlname = XEXP (rtlname, 0);
  2409. gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
  2410. oldname = XSTR (rtlname, 0);
  2411. if (mcore_dllexport_name_p (oldname))
  2412. return; /* Already done. */
  2413. newname = XALLOCAVEC (char, strlen (oldname) + 4);
  2414. sprintf (newname, "@e.%s", oldname);
  2415. /* We pass newname through get_identifier to ensure it has a unique
  2416. address. RTL processing can sometimes peek inside the symbol ref
  2417. and compare the string's addresses to see if two symbols are
  2418. identical. */
  2419. /* ??? At least I think that's why we do this. */
  2420. idp = get_identifier (newname);
  2421. XEXP (DECL_RTL (decl), 0) =
  2422. gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
  2423. }
  2424. /* Mark a DECL as being dllimport'd. */
  2425. static void
  2426. mcore_mark_dllimport (tree decl)
  2427. {
  2428. const char * oldname;
  2429. char * newname;
  2430. tree idp;
  2431. rtx rtlname;
  2432. rtx newrtl;
  2433. rtlname = XEXP (DECL_RTL (decl), 0);
  2434. if (GET_CODE (rtlname) == MEM)
  2435. rtlname = XEXP (rtlname, 0);
  2436. gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
  2437. oldname = XSTR (rtlname, 0);
  2438. gcc_assert (!mcore_dllexport_name_p (oldname));
  2439. if (mcore_dllimport_name_p (oldname))
  2440. return; /* Already done. */
  2441. /* ??? One can well ask why we're making these checks here,
  2442. and that would be a good question. */
  2443. /* Imported variables can't be initialized. */
  2444. if (TREE_CODE (decl) == VAR_DECL
  2445. && !DECL_VIRTUAL_P (decl)
  2446. && DECL_INITIAL (decl))
  2447. {
  2448. error ("initialized variable %q+D is marked dllimport", decl);
  2449. return;
  2450. }
  2451. /* `extern' needn't be specified with dllimport.
  2452. Specify `extern' now and hope for the best. Sigh. */
  2453. if (TREE_CODE (decl) == VAR_DECL
  2454. /* ??? Is this test for vtables needed? */
  2455. && !DECL_VIRTUAL_P (decl))
  2456. {
  2457. DECL_EXTERNAL (decl) = 1;
  2458. TREE_PUBLIC (decl) = 1;
  2459. }
  2460. newname = XALLOCAVEC (char, strlen (oldname) + 11);
  2461. sprintf (newname, "@i.__imp_%s", oldname);
  2462. /* We pass newname through get_identifier to ensure it has a unique
  2463. address. RTL processing can sometimes peek inside the symbol ref
  2464. and compare the string's addresses to see if two symbols are
  2465. identical. */
  2466. /* ??? At least I think that's why we do this. */
  2467. idp = get_identifier (newname);
  2468. newrtl = gen_rtx_MEM (Pmode,
  2469. gen_rtx_SYMBOL_REF (Pmode,
  2470. IDENTIFIER_POINTER (idp)));
  2471. XEXP (DECL_RTL (decl), 0) = newrtl;
  2472. }
  2473. static int
  2474. mcore_dllexport_p (tree decl)
  2475. {
  2476. if ( TREE_CODE (decl) != VAR_DECL
  2477. && TREE_CODE (decl) != FUNCTION_DECL)
  2478. return 0;
  2479. return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
  2480. }
  2481. static int
  2482. mcore_dllimport_p (tree decl)
  2483. {
  2484. if ( TREE_CODE (decl) != VAR_DECL
  2485. && TREE_CODE (decl) != FUNCTION_DECL)
  2486. return 0;
  2487. return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
  2488. }
  2489. /* We must mark dll symbols specially. Definitions of dllexport'd objects
  2490. install some info in the .drective (PE) or .exports (ELF) sections. */
  2491. static void
  2492. mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
  2493. {
  2494. /* Mark the decl so we can tell from the rtl whether the object is
  2495. dllexport'd or dllimport'd. */
  2496. if (mcore_dllexport_p (decl))
  2497. mcore_mark_dllexport (decl);
  2498. else if (mcore_dllimport_p (decl))
  2499. mcore_mark_dllimport (decl);
  2500. /* It might be that DECL has already been marked as dllimport, but
  2501. a subsequent definition nullified that. The attribute is gone
  2502. but DECL_RTL still has @i.__imp_foo. We need to remove that. */
  2503. else if ((TREE_CODE (decl) == FUNCTION_DECL
  2504. || TREE_CODE (decl) == VAR_DECL)
  2505. && DECL_RTL (decl) != NULL_RTX
  2506. && GET_CODE (DECL_RTL (decl)) == MEM
  2507. && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
  2508. && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
  2509. && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
  2510. {
  2511. const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
  2512. tree idp = get_identifier (oldname + 9);
  2513. rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
  2514. XEXP (DECL_RTL (decl), 0) = newrtl;
  2515. /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
  2516. ??? We leave these alone for now. */
  2517. }
  2518. }
  2519. /* Undo the effects of the above. */
  2520. static const char *
  2521. mcore_strip_name_encoding (const char * str)
  2522. {
  2523. return str + (str[0] == '@' ? 3 : 0);
  2524. }
  2525. /* MCore specific attribute support.
  2526. dllexport - for exporting a function/variable that will live in a dll
  2527. dllimport - for importing a function/variable from a dll
  2528. naked - do not create a function prologue/epilogue. */
  2529. /* Handle a "naked" attribute; arguments as in
  2530. struct attribute_spec.handler. */
  2531. static tree
  2532. mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
  2533. int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
  2534. {
  2535. if (TREE_CODE (*node) != FUNCTION_DECL)
  2536. {
  2537. warning (OPT_Wattributes, "%qE attribute only applies to functions",
  2538. name);
  2539. *no_add_attrs = true;
  2540. }
  2541. return NULL_TREE;
  2542. }
  2543. /* ??? It looks like this is PE specific? Oh well, this is what the
  2544. old code did as well. */
  2545. static void
  2546. mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
  2547. {
  2548. int len;
  2549. const char * name;
  2550. char * string;
  2551. const char * prefix;
  2552. name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
  2553. /* Strip off any encoding in name. */
  2554. name = (* targetm.strip_name_encoding) (name);
  2555. /* The object is put in, for example, section .text$foo.
  2556. The linker will then ultimately place them in .text
  2557. (everything from the $ on is stripped). */
  2558. if (TREE_CODE (decl) == FUNCTION_DECL)
  2559. prefix = ".text$";
  2560. /* For compatibility with EPOC, we ignore the fact that the
  2561. section might have relocs against it. */
  2562. else if (decl_readonly_section (decl, 0))
  2563. prefix = ".rdata$";
  2564. else
  2565. prefix = ".data$";
  2566. len = strlen (name) + strlen (prefix);
  2567. string = XALLOCAVEC (char, len + 1);
  2568. sprintf (string, "%s%s", prefix, name);
  2569. set_decl_section_name (decl, string);
  2570. }
  2571. int
  2572. mcore_naked_function_p (void)
  2573. {
  2574. return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
  2575. }
  2576. static bool
  2577. mcore_warn_func_return (tree decl)
  2578. {
  2579. /* Naked functions are implemented entirely in assembly, including the
  2580. return sequence, so suppress warnings about this. */
  2581. return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
  2582. }
  2583. #ifdef OBJECT_FORMAT_ELF
  2584. static void
  2585. mcore_asm_named_section (const char *name,
  2586. unsigned int flags ATTRIBUTE_UNUSED,
  2587. tree decl ATTRIBUTE_UNUSED)
  2588. {
  2589. fprintf (asm_out_file, "\t.section %s\n", name);
  2590. }
  2591. #endif /* OBJECT_FORMAT_ELF */
  2592. /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
  2593. static void
  2594. mcore_external_libcall (rtx fun)
  2595. {
  2596. fprintf (asm_out_file, "\t.import\t");
  2597. assemble_name (asm_out_file, XSTR (fun, 0));
  2598. fprintf (asm_out_file, "\n");
  2599. }
  2600. /* Worker function for TARGET_RETURN_IN_MEMORY. */
  2601. static bool
  2602. mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
  2603. {
  2604. const HOST_WIDE_INT size = int_size_in_bytes (type);
  2605. return (size == -1 || size > 2 * UNITS_PER_WORD);
  2606. }
  2607. /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
  2608. Output assembler code for a block containing the constant parts
  2609. of a trampoline, leaving space for the variable parts.
  2610. On the MCore, the trampoline looks like:
  2611. lrw r1, function
  2612. lrw r13, area
  2613. jmp r13
  2614. or r0, r0
  2615. .literals */
  2616. static void
  2617. mcore_asm_trampoline_template (FILE *f)
  2618. {
  2619. fprintf (f, "\t.short 0x7102\n");
  2620. fprintf (f, "\t.short 0x7d02\n");
  2621. fprintf (f, "\t.short 0x00cd\n");
  2622. fprintf (f, "\t.short 0x1e00\n");
  2623. fprintf (f, "\t.long 0\n");
  2624. fprintf (f, "\t.long 0\n");
  2625. }
  2626. /* Worker function for TARGET_TRAMPOLINE_INIT. */
  2627. static void
  2628. mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
  2629. {
  2630. rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
  2631. rtx mem;
  2632. emit_block_move (m_tramp, assemble_trampoline_template (),
  2633. GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
  2634. mem = adjust_address (m_tramp, SImode, 8);
  2635. emit_move_insn (mem, chain_value);
  2636. mem = adjust_address (m_tramp, SImode, 12);
  2637. emit_move_insn (mem, fnaddr);
  2638. }
  2639. /* Implement TARGET_LEGITIMATE_CONSTANT_P
  2640. On the MCore, allow anything but a double. */
  2641. static bool
  2642. mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
  2643. {
  2644. return GET_CODE (x) != CONST_DOUBLE;
  2645. }