123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563 |
- /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #ifndef MacroAssemblerX86Common_h
- #define MacroAssemblerX86Common_h
- #if ENABLE(ASSEMBLER)
- #include "X86Assembler.h"
- #include "AbstractMacroAssembler.h"
- #include "JITStubEntries.h"
- namespace JSC {
- class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
- protected:
- #if CPU(X86_64)
- static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
- #endif
- static const int DoubleConditionBitInvert = 0x10;
- static const int DoubleConditionBitSpecial = 0x20;
- static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
- public:
- typedef X86Assembler::FPRegisterID FPRegisterID;
- typedef X86Assembler::XMMRegisterID XMMRegisterID;
-
- static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
- {
- return value >= -128 && value <= 127;
- }
- enum RelationalCondition {
- Equal = X86Assembler::ConditionE,
- NotEqual = X86Assembler::ConditionNE,
- Above = X86Assembler::ConditionA,
- AboveOrEqual = X86Assembler::ConditionAE,
- Below = X86Assembler::ConditionB,
- BelowOrEqual = X86Assembler::ConditionBE,
- GreaterThan = X86Assembler::ConditionG,
- GreaterThanOrEqual = X86Assembler::ConditionGE,
- LessThan = X86Assembler::ConditionL,
- LessThanOrEqual = X86Assembler::ConditionLE
- };
- enum ResultCondition {
- Overflow = X86Assembler::ConditionO,
- Signed = X86Assembler::ConditionS,
- PositiveOrZero = X86Assembler::ConditionNS,
- Zero = X86Assembler::ConditionE,
- NonZero = X86Assembler::ConditionNE
- };
- enum DoubleCondition {
- // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
- DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
- DoubleNotEqual = X86Assembler::ConditionNE,
- DoubleGreaterThan = X86Assembler::ConditionA,
- DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
- DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
- DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
- // If either operand is NaN, these conditions always evaluate to true.
- DoubleEqualOrUnordered = X86Assembler::ConditionE,
- DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
- DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
- DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
- DoubleLessThanOrUnordered = X86Assembler::ConditionB,
- DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
- };
- COMPILE_ASSERT(
- !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
- DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
- static const RegisterID stackPointerRegister = X86Registers::esp;
- #if ENABLE(JIT_CONSTANT_BLINDING)
- static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
- #if CPU(X86_64)
- static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
- #if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
- static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
- #endif
- #endif
- #endif
- // Integer arithmetic operations:
- //
- // Operations are typically two operand - operation(source, srcDst)
- // For many operations the source may be an TrustedImm32, the srcDst operand
- // may often be a memory location (explictly described using an Address
- // object).
- void add32(RegisterID src, RegisterID dest)
- {
- m_assembler.addl_rr(src, dest);
- }
- void add32(TrustedImm32 imm, Address address)
- {
- m_assembler.addl_im(imm.m_value, address.offset, address.base);
- }
- void add32(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.addl_ir(imm.m_value, dest);
- }
-
- void add32(Address src, RegisterID dest)
- {
- m_assembler.addl_mr(src.offset, src.base, dest);
- }
- void add32(RegisterID src, Address dest)
- {
- m_assembler.addl_rm(src, dest.offset, dest.base);
- }
- void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
- {
- m_assembler.leal_mr(imm.m_value, src, dest);
- }
-
- void and32(RegisterID src, RegisterID dest)
- {
- m_assembler.andl_rr(src, dest);
- }
- void and32(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.andl_ir(imm.m_value, dest);
- }
- void and32(RegisterID src, Address dest)
- {
- m_assembler.andl_rm(src, dest.offset, dest.base);
- }
- void and32(Address src, RegisterID dest)
- {
- m_assembler.andl_mr(src.offset, src.base, dest);
- }
- void and32(TrustedImm32 imm, Address address)
- {
- m_assembler.andl_im(imm.m_value, address.offset, address.base);
- }
- void and32(RegisterID op1, RegisterID op2, RegisterID dest)
- {
- if (op1 == op2)
- zeroExtend32ToPtr(op1, dest);
- else if (op1 == dest)
- and32(op2, dest);
- else {
- move(op2, dest);
- and32(op1, dest);
- }
- }
- void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
- {
- move(src, dest);
- and32(imm, dest);
- }
- void lshift32(RegisterID shift_amount, RegisterID dest)
- {
- ASSERT(shift_amount != dest);
- if (shift_amount == X86Registers::ecx)
- m_assembler.shll_CLr(dest);
- else {
- // On x86 we can only shift by ecx; if asked to shift by another register we'll
- // need rejig the shift amount into ecx first, and restore the registers afterwards.
- // If we dest is ecx, then shift the swapped register!
- swap(shift_amount, X86Registers::ecx);
- m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
- swap(shift_amount, X86Registers::ecx);
- }
- }
- void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
- {
- ASSERT(shift_amount != dest);
- if (src != dest)
- move(src, dest);
- lshift32(shift_amount, dest);
- }
- void lshift32(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.shll_i8r(imm.m_value, dest);
- }
-
- void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
- {
- if (src != dest)
- move(src, dest);
- lshift32(imm, dest);
- }
-
- void mul32(RegisterID src, RegisterID dest)
- {
- m_assembler.imull_rr(src, dest);
- }
- void mul32(Address src, RegisterID dest)
- {
- m_assembler.imull_mr(src.offset, src.base, dest);
- }
-
- void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
- {
- m_assembler.imull_i32r(src, imm.m_value, dest);
- }
- void neg32(RegisterID srcDest)
- {
- m_assembler.negl_r(srcDest);
- }
- void neg32(Address srcDest)
- {
- m_assembler.negl_m(srcDest.offset, srcDest.base);
- }
- void or32(RegisterID src, RegisterID dest)
- {
- m_assembler.orl_rr(src, dest);
- }
- void or32(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.orl_ir(imm.m_value, dest);
- }
- void or32(RegisterID src, Address dest)
- {
- m_assembler.orl_rm(src, dest.offset, dest.base);
- }
- void or32(Address src, RegisterID dest)
- {
- m_assembler.orl_mr(src.offset, src.base, dest);
- }
- void or32(TrustedImm32 imm, Address address)
- {
- m_assembler.orl_im(imm.m_value, address.offset, address.base);
- }
- void or32(RegisterID op1, RegisterID op2, RegisterID dest)
- {
- if (op1 == op2)
- zeroExtend32ToPtr(op1, dest);
- else if (op1 == dest)
- or32(op2, dest);
- else {
- move(op2, dest);
- or32(op1, dest);
- }
- }
- void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
- {
- move(src, dest);
- or32(imm, dest);
- }
- void rshift32(RegisterID shift_amount, RegisterID dest)
- {
- ASSERT(shift_amount != dest);
- if (shift_amount == X86Registers::ecx)
- m_assembler.sarl_CLr(dest);
- else {
- // On x86 we can only shift by ecx; if asked to shift by another register we'll
- // need rejig the shift amount into ecx first, and restore the registers afterwards.
- // If we dest is ecx, then shift the swapped register!
- swap(shift_amount, X86Registers::ecx);
- m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
- swap(shift_amount, X86Registers::ecx);
- }
- }
- void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
- {
- ASSERT(shift_amount != dest);
- if (src != dest)
- move(src, dest);
- rshift32(shift_amount, dest);
- }
- void rshift32(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.sarl_i8r(imm.m_value, dest);
- }
-
- void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
- {
- if (src != dest)
- move(src, dest);
- rshift32(imm, dest);
- }
-
- void urshift32(RegisterID shift_amount, RegisterID dest)
- {
- ASSERT(shift_amount != dest);
- if (shift_amount == X86Registers::ecx)
- m_assembler.shrl_CLr(dest);
- else {
- // On x86 we can only shift by ecx; if asked to shift by another register we'll
- // need rejig the shift amount into ecx first, and restore the registers afterwards.
- // If we dest is ecx, then shift the swapped register!
- swap(shift_amount, X86Registers::ecx);
- m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
- swap(shift_amount, X86Registers::ecx);
- }
- }
- void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
- {
- ASSERT(shift_amount != dest);
- if (src != dest)
- move(src, dest);
- urshift32(shift_amount, dest);
- }
- void urshift32(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.shrl_i8r(imm.m_value, dest);
- }
-
- void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
- {
- if (src != dest)
- move(src, dest);
- urshift32(imm, dest);
- }
-
- void sub32(RegisterID src, RegisterID dest)
- {
- m_assembler.subl_rr(src, dest);
- }
-
- void sub32(TrustedImm32 imm, RegisterID dest)
- {
- m_assembler.subl_ir(imm.m_value, dest);
- }
-
- void sub32(TrustedImm32 imm, Address address)
- {
- m_assembler.subl_im(imm.m_value, address.offset, address.base);
- }
- void sub32(Address src, RegisterID dest)
- {
- m_assembler.subl_mr(src.offset, src.base, dest);
- }
- void sub32(RegisterID src, Address dest)
- {
- m_assembler.subl_rm(src, dest.offset, dest.base);
- }
- void xor32(RegisterID src, RegisterID dest)
- {
- m_assembler.xorl_rr(src, dest);
- }
- void xor32(TrustedImm32 imm, Address dest)
- {
- if (imm.m_value == -1)
- m_assembler.notl_m(dest.offset, dest.base);
- else
- m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
- }
- void xor32(TrustedImm32 imm, RegisterID dest)
- {
- if (imm.m_value == -1)
- m_assembler.notl_r(dest);
- else
- m_assembler.xorl_ir(imm.m_value, dest);
- }
- void xor32(RegisterID src, Address dest)
- {
- m_assembler.xorl_rm(src, dest.offset, dest.base);
- }
- void xor32(Address src, RegisterID dest)
- {
- m_assembler.xorl_mr(src.offset, src.base, dest);
- }
-
- void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
- {
- if (op1 == op2)
- move(TrustedImm32(0), dest);
- else if (op1 == dest)
- xor32(op2, dest);
- else {
- move(op2, dest);
- xor32(op1, dest);
- }
- }
- void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
- {
- move(src, dest);
- xor32(imm, dest);
- }
- void sqrtDouble(FPRegisterID src, FPRegisterID dst)
- {
- m_assembler.sqrtsd_rr(src, dst);
- }
- void absDouble(FPRegisterID src, FPRegisterID dst)
- {
- ASSERT(src != dst);
- #if 0 // moved to JITStubEntries.cpp
- static const double negativeZeroConstant = -0.0;
- #endif
- DEFINE_REMOTE_VAR_PTR(double, negativeZeroConstant);
- loadDouble(REMOTE_VAR_PTR(negativeZeroConstant), dst);
- m_assembler.andnpd_rr(src, dst);
- }
- void negateDouble(FPRegisterID src, FPRegisterID dst)
- {
- ASSERT(src != dst);
- #if 0 // moved to JITStubEntries.cpp
- static const double negativeZeroConstant = -0.0;
- #endif
- DEFINE_REMOTE_VAR_PTR(double, negativeZeroConstant);
- loadDouble(REMOTE_VAR_PTR(negativeZeroConstant), dst);
- m_assembler.xorpd_rr(src, dst);
- }
- // Memory access operations:
- //
- // Loads are of the form load(address, destination) and stores of the form
- // store(source, address). The source for a store may be an TrustedImm32. Address
- // operand objects to loads and store will be implicitly constructed if a
- // register is passed.
- void load32(ImplicitAddress address, RegisterID dest)
- {
- m_assembler.movl_mr(address.offset, address.base, dest);
- }
- void load32(BaseIndex address, RegisterID dest)
- {
- m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
- }
- void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
- {
- load32(address, dest);
- }
- void load16Unaligned(BaseIndex address, RegisterID dest)
- {
- load16(address, dest);
- }
- DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
- {
- padBeforePatch();
- m_assembler.movl_mr_disp32(address.offset, address.base, dest);
- return DataLabel32(this);
- }
-
- DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
- {
- padBeforePatch();
- m_assembler.movl_mr_disp8(address.offset, address.base, dest);
- return DataLabelCompact(this);
- }
-
- static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
- #if ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT
- ;
- #else
- {
- ASSERT(isCompactPtrAlignedAddressOffset(value));
- AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
- }
- #endif
- DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
- {
- padBeforePatch();
- m_assembler.movl_mr_disp8(address.offset, address.base, dest);
- return DataLabelCompact(this);
- }
- void load8(BaseIndex address, RegisterID dest)
- {
- m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
- }
- void load8(ImplicitAddress address, RegisterID dest)
- {
- m_assembler.movzbl_mr(address.offset, address.base, dest);
- }
-
- void load8Signed(BaseIndex address, RegisterID dest)
- {
- m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
- }
- void load8Signed(ImplicitAddress address, RegisterID dest)
- {
- m_assembler.movsbl_mr(address.offset, address.base, dest);
- }
-
- void load16(BaseIndex address, RegisterID dest)
- {
- m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
- }
-
- void load16(Address address, RegisterID dest)
- {
- m_assembler.movzwl_mr(address.offset, address.base, dest);
- }
- void load16Signed(BaseIndex address, RegisterID dest)
- {
- m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
- }
-
- void load16Signed(Address address, RegisterID dest)
- {
- m_assembler.movswl_mr(address.offset, address.base, dest);
- }
- DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
- {
- padBeforePatch();
- m_assembler.movl_rm_disp32(src, address.offset, address.base);
- return DataLabel32(this);
- }
- void store32(RegisterID src, ImplicitAddress address)
- {
- m_assembler.movl_rm(src, address.offset, address.base);
- }
- void store32(RegisterID src, BaseIndex address)
- {
- m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
- }
- void store32(TrustedImm32 imm, ImplicitAddress address)
- {
- m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
- }
-
- void store32(TrustedImm32 imm, BaseIndex address)
- {
- m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
- }
- void store8(TrustedImm32 imm, Address address)
- {
- ASSERT(-128 <= imm.m_value && imm.m_value < 128);
- m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
- }
- void store8(TrustedImm32 imm, BaseIndex address)
- {
- ASSERT(-128 <= imm.m_value && imm.m_value < 128);
- m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
- }
-
- void store8(RegisterID src, BaseIndex address)
- {
- #if CPU(X86)
- // On 32-bit x86 we can only store from the first 4 registers;
- // esp..edi are mapped to the 'h' registers!
- if (src >= 4) {
- // Pick a temporary register.
- RegisterID temp;
- if (address.base != X86Registers::eax && address.index != X86Registers::eax)
- temp = X86Registers::eax;
- else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
- temp = X86Registers::ebx;
- else {
- ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
- temp = X86Registers::ecx;
- }
- // Swap to the temporary register to perform the store.
- swap(src, temp);
- m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
- swap(src, temp);
- return;
- }
- #endif
- m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
- }
- void store16(RegisterID src, BaseIndex address)
- {
- #if CPU(X86)
- // On 32-bit x86 we can only store from the first 4 registers;
- // esp..edi are mapped to the 'h' registers!
- if (src >= 4) {
- // Pick a temporary register.
- RegisterID temp;
- if (address.base != X86Registers::eax && address.index != X86Registers::eax)
- temp = X86Registers::eax;
- else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
- temp = X86Registers::ebx;
- else {
- ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
- temp = X86Registers::ecx;
- }
-
- // Swap to the temporary register to perform the store.
- swap(src, temp);
- m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
- swap(src, temp);
- return;
- }
- #endif
- m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
- }
- // Floating-point operation:
- //
- // Presently only supports SSE, not x87 floating point.
- void moveDouble(FPRegisterID src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- if (src != dest)
- m_assembler.movsd_rr(src, dest);
- }
- void loadDouble(const void* address, FPRegisterID dest)
- {
- #if CPU(X86)
- ASSERT(isSSE2Present());
- m_assembler.movsd_mr(address, dest);
- #else
- move(TrustedImmPtr(address), scratchRegister);
- loadDouble(scratchRegister, dest);
- #endif
- }
- void loadDouble(ImplicitAddress address, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.movsd_mr(address.offset, address.base, dest);
- }
-
- void loadDouble(BaseIndex address, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
- }
- void loadFloat(BaseIndex address, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
- }
- void storeDouble(FPRegisterID src, ImplicitAddress address)
- {
- ASSERT(isSSE2Present());
- m_assembler.movsd_rm(src, address.offset, address.base);
- }
-
- void storeDouble(FPRegisterID src, BaseIndex address)
- {
- ASSERT(isSSE2Present());
- m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
- }
-
- void storeFloat(FPRegisterID src, BaseIndex address)
- {
- ASSERT(isSSE2Present());
- m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
- }
-
- void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvtsd2ss_rr(src, dst);
- }
- void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvtss2sd_rr(src, dst);
- }
- void addDouble(FPRegisterID src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.addsd_rr(src, dest);
- }
- void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- if (op1 == dest)
- addDouble(op2, dest);
- else {
- moveDouble(op2, dest);
- addDouble(op1, dest);
- }
- }
- void addDouble(Address src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.addsd_mr(src.offset, src.base, dest);
- }
- void divDouble(FPRegisterID src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.divsd_rr(src, dest);
- }
- void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
- {
- // B := A / B is invalid.
- ASSERT(op1 == dest || op2 != dest);
- moveDouble(op1, dest);
- divDouble(op2, dest);
- }
- void divDouble(Address src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.divsd_mr(src.offset, src.base, dest);
- }
- void subDouble(FPRegisterID src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.subsd_rr(src, dest);
- }
- void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
- {
- // B := A - B is invalid.
- ASSERT(op1 == dest || op2 != dest);
- moveDouble(op1, dest);
- subDouble(op2, dest);
- }
- void subDouble(Address src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.subsd_mr(src.offset, src.base, dest);
- }
- void mulDouble(FPRegisterID src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.mulsd_rr(src, dest);
- }
- void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- if (op1 == dest)
- mulDouble(op2, dest);
- else {
- moveDouble(op2, dest);
- mulDouble(op1, dest);
- }
- }
- void mulDouble(Address src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.mulsd_mr(src.offset, src.base, dest);
- }
- void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvtsi2sd_rr(src, dest);
- }
- void convertInt32ToDouble(Address src, FPRegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
- }
- Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
- {
- ASSERT(isSSE2Present());
- if (cond & DoubleConditionBitInvert)
- m_assembler.ucomisd_rr(left, right);
- else
- m_assembler.ucomisd_rr(right, left);
- if (cond == DoubleEqual) {
- if (left == right)
- return Jump(m_assembler.jnp());
- Jump isUnordered(m_assembler.jp());
- Jump result = Jump(m_assembler.je());
- isUnordered.link(this);
- return result;
- } else if (cond == DoubleNotEqualOrUnordered) {
- if (left == right)
- return Jump(m_assembler.jp());
- Jump isUnordered(m_assembler.jp());
- Jump isEqual(m_assembler.je());
- isUnordered.link(this);
- Jump result = jump();
- isEqual.link(this);
- return result;
- }
- ASSERT(!(cond & DoubleConditionBitSpecial));
- return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
- }
- // Truncates 'src' to an integer, and places the resulting 'dest'.
- // If the result is not representable as a 32 bit value, branch.
- // May also branch for some values that are representable in 32 bits
- // (specifically, in this case, INT_MIN).
- enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
- Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvttsd2si_rr(src, dest);
- return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
- }
- Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvttsd2si_rr(src, dest);
- return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0));
- }
- void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvttsd2si_rr(src, dest);
- }
-
- #if CPU(X86_64)
- void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvttsd2siq_rr(src, dest);
- }
- #endif
-
- // Convert 'src' to an integer, and places the resulting 'dest'.
- // If the result is not representable as a 32 bit value, branch.
- // May also branch for some values that are representable in 32 bits
- // (specifically, in this case, 0).
- void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
- {
- ASSERT(isSSE2Present());
- m_assembler.cvttsd2si_rr(src, dest);
- // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
- if (negZeroCheck)
- failureCases.append(branchTest32(Zero, dest));
- // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
- convertInt32ToDouble(dest, fpTemp);
- m_assembler.ucomisd_rr(fpTemp, src);
- failureCases.append(m_assembler.jp());
- failureCases.append(m_assembler.jne());
- }
- Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
- {
- ASSERT(isSSE2Present());
- m_assembler.xorpd_rr(scratch, scratch);
- return branchDouble(DoubleNotEqual, reg, scratch);
- }
- Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
- {
- ASSERT(isSSE2Present());
- m_assembler.xorpd_rr(scratch, scratch);
- return branchDouble(DoubleEqualOrUnordered, reg, scratch);
- }
- void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
- {
- ASSERT(isSSE2Present());
- m_assembler.psllq_i8r(imm.m_value, reg);
- }
- void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
- {
- ASSERT(isSSE2Present());
- m_assembler.psrlq_i8r(imm.m_value, reg);
- }
- void orPacked(XMMRegisterID src, XMMRegisterID dst)
- {
- ASSERT(isSSE2Present());
- m_assembler.por_rr(src, dst);
- }
- void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
- {
- ASSERT(isSSE2Present());
- m_assembler.movd_rr(src, dst);
- }
- void movePackedToInt32(XMMRegisterID src, RegisterID dst)
- {
- ASSERT(isSSE2Present());
- m_assembler.movd_rr(src, dst);
- }
- // Stack manipulation operations:
- //
- // The ABI is assumed to provide a stack abstraction to memory,
- // containing machine word sized units of data. Push and pop
- // operations add and remove a single register sized unit of data
- // to or from the stack. Peek and poke operations read or write
- // values on the stack, without moving the current stack position.
-
- void pop(RegisterID dest)
- {
- m_assembler.pop_r(dest);
- }
- void push(RegisterID src)
- {
- m_assembler.push_r(src);
- }
- void push(Address address)
- {
- m_assembler.push_m(address.offset, address.base);
- }
- void push(TrustedImm32 imm)
- {
- m_assembler.push_i32(imm.m_value);
- }
- // Register move operations:
- //
- // Move values in registers.
- void move(TrustedImm32 imm, RegisterID dest)
- {
- // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
- // may be useful to have a separate version that sign extends the value?
- if (!imm.m_value)
- m_assembler.xorl_rr(dest, dest);
- else
- m_assembler.movl_i32r(imm.m_value, dest);
- }
- #if CPU(X86_64)
- void move(RegisterID src, RegisterID dest)
- {
- // Note: on 64-bit this is is a full register move; perhaps it would be
- // useful to have separate move32 & movePtr, with move32 zero extending?
- if (src != dest)
- m_assembler.movq_rr(src, dest);
- }
- void move(TrustedImmPtr imm, RegisterID dest)
- {
- m_assembler.movq_i64r(imm.asIntptr(), dest);
- }
- void move(TrustedImm64 imm, RegisterID dest)
- {
- m_assembler.movq_i64r(imm.m_value, dest);
- }
- void swap(RegisterID reg1, RegisterID reg2)
- {
- if (reg1 != reg2)
- m_assembler.xchgq_rr(reg1, reg2);
- }
- void signExtend32ToPtr(RegisterID src, RegisterID dest)
- {
- m_assembler.movsxd_rr(src, dest);
- }
- void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
- {
- m_assembler.movl_rr(src, dest);
- }
- #else
- void move(RegisterID src, RegisterID dest)
- {
- if (src != dest)
- m_assembler.movl_rr(src, dest);
- }
- void move(TrustedImmPtr imm, RegisterID dest)
- {
- m_assembler.movl_i32r(imm.asIntptr(), dest);
- }
- void swap(RegisterID reg1, RegisterID reg2)
- {
- if (reg1 != reg2)
- m_assembler.xchgl_rr(reg1, reg2);
- }
- void signExtend32ToPtr(RegisterID src, RegisterID dest)
- {
- move(src, dest);
- }
- void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
- {
- move(src, dest);
- }
- #endif
- // Forwards / external control flow operations:
- //
- // This set of jump and conditional branch operations return a Jump
- // object which may linked at a later point, allow forwards jump,
- // or jumps that will require external linkage (after the code has been
- // relocated).
- //
- // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
- // respecitvely, for unsigned comparisons the names b, a, be, and ae are
- // used (representing the names 'below' and 'above').
- //
- // Operands to the comparision are provided in the expected order, e.g.
- // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
- // treated as a signed 32bit value, is less than or equal to 5.
- //
- // jz and jnz test whether the first operand is equal to zero, and take
- // an optional second operand of a mask under which to perform the test.
- public:
- Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
- {
- m_assembler.cmpb_im(right.m_value, left.offset, left.base);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
- {
- m_assembler.cmpl_rr(right, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
- {
- if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
- m_assembler.testl_rr(left, left);
- else
- m_assembler.cmpl_ir(right.m_value, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branch32(RelationalCondition cond, RegisterID left, Address right)
- {
- m_assembler.cmpl_mr(right.offset, right.base, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branch32(RelationalCondition cond, Address left, RegisterID right)
- {
- m_assembler.cmpl_rm(right, left.offset, left.base);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
- {
- m_assembler.cmpl_im(right.m_value, left.offset, left.base);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
- {
- m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
- {
- return branch32(cond, left, right);
- }
- Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
- {
- m_assembler.testl_rr(reg, mask);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
- {
- // if we are only interested in the low seven bits, this can be tested with a testb
- if (mask.m_value == -1)
- m_assembler.testl_rr(reg, reg);
- else
- m_assembler.testl_i32r(mask.m_value, reg);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
- {
- if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base);
- else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
- {
- if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
- else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
- {
- // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
- ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
- if (mask.m_value == -1)
- m_assembler.cmpb_im(0, address.offset, address.base);
- else
- m_assembler.testb_im(mask.m_value, address.offset, address.base);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
- {
- // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
- ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
- if (mask.m_value == -1)
- m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
- else
- m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
- {
- ASSERT(!(right.m_value & 0xFFFFFF00));
- m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump jump()
- {
- return Jump(m_assembler.jmp());
- }
- void jump(RegisterID target)
- {
- m_assembler.jmp_r(target);
- }
- // Address is a memory location containing the address to jump to
- void jump(Address address)
- {
- m_assembler.jmp_m(address.offset, address.base);
- }
- // Arithmetic control flow operations:
- //
- // This set of conditional branch operations branch based
- // on the result of an arithmetic operation. The operation
- // is performed as normal, storing the result.
- //
- // * jz operations branch if the result is zero.
- // * jo operations branch if the (signed) arithmetic
- // operation caused an overflow to occur.
-
- Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
- {
- add32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
- {
- add32(imm, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
- {
- add32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
- {
- add32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
- {
- add32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
- {
- if (src1 == dest)
- return branchAdd32(cond, src2, dest);
- move(src2, dest);
- return branchAdd32(cond, src1, dest);
- }
- Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
- {
- move(src, dest);
- return branchAdd32(cond, imm, dest);
- }
- Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
- {
- mul32(src, dest);
- if (cond != Overflow)
- m_assembler.testl_rr(dest, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
- {
- mul32(src, dest);
- if (cond != Overflow)
- m_assembler.testl_rr(dest, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
- {
- mul32(imm, src, dest);
- if (cond != Overflow)
- m_assembler.testl_rr(dest, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
- {
- if (src1 == dest)
- return branchMul32(cond, src2, dest);
- move(src2, dest);
- return branchMul32(cond, src1, dest);
- }
- Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
- {
- sub32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
- {
- sub32(imm, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
- {
- sub32(imm, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
- {
- sub32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
- {
- sub32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
- {
- // B := A - B is invalid.
- ASSERT(src1 == dest || src2 != dest);
- move(src1, dest);
- return branchSub32(cond, src2, dest);
- }
- Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
- {
- move(src1, dest);
- return branchSub32(cond, src2, dest);
- }
- Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
- {
- neg32(srcDest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
- {
- or32(src, dest);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- // Miscellaneous operations:
- void breakpoint()
- {
- #if OS(ORBIS)
- m_assembler.int_imm(0x41);
- #else
- m_assembler.int3();
- #endif
- }
- Call nearCall()
- {
- return Call(m_assembler.call(), Call::LinkableNear);
- }
- Call call(RegisterID target)
- {
- return Call(m_assembler.call(target), Call::None);
- }
- void call(Address address)
- {
- m_assembler.call_m(address.offset, address.base);
- }
- void ret()
- {
- m_assembler.ret();
- }
- void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
- {
- m_assembler.cmpb_im(right.m_value, left.offset, left.base);
- set32(x86Condition(cond), dest);
- }
-
- void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
- {
- m_assembler.cmpl_rr(right, left);
- set32(x86Condition(cond), dest);
- }
- void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
- {
- if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
- m_assembler.testl_rr(left, left);
- else
- m_assembler.cmpl_ir(right.m_value, left);
- set32(x86Condition(cond), dest);
- }
- // FIXME:
- // The mask should be optional... perhaps the argument order should be
- // dest-src, operations always have a dest? ... possibly not true, considering
- // asm ops like test, or pseudo ops like pop().
- void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
- {
- if (mask.m_value == -1)
- m_assembler.cmpb_im(0, address.offset, address.base);
- else
- m_assembler.testb_im(mask.m_value, address.offset, address.base);
- set32(x86Condition(cond), dest);
- }
- void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
- {
- if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base);
- else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
- set32(x86Condition(cond), dest);
- }
- // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
- static RelationalCondition invert(RelationalCondition cond)
- {
- return static_cast<RelationalCondition>(cond ^ 1);
- }
- void nop()
- {
- m_assembler.nop();
- }
- static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
- #if ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT
- ;
- #else
- {
- X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
- }
- #endif
- static ptrdiff_t maxJumpReplacementSize()
- {
- return X86Assembler::maxJumpReplacementSize();
- }
- protected:
- X86Assembler::Condition x86Condition(RelationalCondition cond)
- {
- return static_cast<X86Assembler::Condition>(cond);
- }
- X86Assembler::Condition x86Condition(ResultCondition cond)
- {
- return static_cast<X86Assembler::Condition>(cond);
- }
- void set32(X86Assembler::Condition cond, RegisterID dest)
- {
- #if CPU(X86)
- // On 32-bit x86 we can only set the first 4 registers;
- // esp..edi are mapped to the 'h' registers!
- if (dest >= 4) {
- m_assembler.xchgl_rr(dest, X86Registers::eax);
- m_assembler.setCC_r(cond, X86Registers::eax);
- m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
- m_assembler.xchgl_rr(dest, X86Registers::eax);
- return;
- }
- #endif
- m_assembler.setCC_r(cond, dest);
- m_assembler.movzbl_rr(dest, dest);
- }
- private:
- // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
- // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
- friend class MacroAssemblerX86;
- #if CPU(X86)
- #if OS(MAC_OS_X)
- // All X86 Macs are guaranteed to support at least SSE2,
- static bool isSSE2Present()
- {
- return true;
- }
- #else // OS(MAC_OS_X)
- enum SSE2CheckState {
- NotCheckedSSE2,
- HasSSE2,
- NoSSE2
- };
- static bool isSSE2Present()
- {
- if (s_sse2CheckState == NotCheckedSSE2) {
- // Default the flags value to zero; if the compiler is
- // not MSVC or GCC we will read this as SSE2 not present.
- int flags = 0;
- #if COMPILER(MSVC)
- _asm {
- mov eax, 1 // cpuid function 1 gives us the standard feature set
- cpuid;
- mov flags, edx;
- }
- #elif COMPILER(GCC)
- asm (
- "movl $0x1, %%eax;"
- "pushl %%ebx;"
- "cpuid;"
- "popl %%ebx;"
- "movl %%edx, %0;"
- : "=g" (flags)
- :
- : "%eax", "%ecx", "%edx"
- );
- #endif
- static const int SSE2FeatureBit = 1 << 26;
- s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
- }
- // Only check once.
- ASSERT(s_sse2CheckState != NotCheckedSSE2);
- return s_sse2CheckState == HasSSE2;
- }
-
- static SSE2CheckState s_sse2CheckState;
- #endif // OS(MAC_OS_X)
- #elif !defined(NDEBUG) // CPU(X86)
- // On x86-64 we should never be checking for SSE2 in a non-debug build,
- // but non debug add this method to keep the asserts above happy.
- static bool isSSE2Present()
- {
- return true;
- }
- #endif
- };
- } // namespace JSC
- #endif // ENABLE(ASSEMBLER)
- #endif // MacroAssemblerX86Common_h
|