MacroAssemblerX86Common.h 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563
  1. /*
  2. * Copyright (C) 2008 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #ifndef MacroAssemblerX86Common_h
  26. #define MacroAssemblerX86Common_h
  27. #if ENABLE(ASSEMBLER)
  28. #include "X86Assembler.h"
  29. #include "AbstractMacroAssembler.h"
  30. #include "JITStubEntries.h"
  31. namespace JSC {
  32. class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
  33. protected:
  34. #if CPU(X86_64)
  35. static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
  36. #endif
  37. static const int DoubleConditionBitInvert = 0x10;
  38. static const int DoubleConditionBitSpecial = 0x20;
  39. static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
  40. public:
  41. typedef X86Assembler::FPRegisterID FPRegisterID;
  42. typedef X86Assembler::XMMRegisterID XMMRegisterID;
  43. static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
  44. {
  45. return value >= -128 && value <= 127;
  46. }
  47. enum RelationalCondition {
  48. Equal = X86Assembler::ConditionE,
  49. NotEqual = X86Assembler::ConditionNE,
  50. Above = X86Assembler::ConditionA,
  51. AboveOrEqual = X86Assembler::ConditionAE,
  52. Below = X86Assembler::ConditionB,
  53. BelowOrEqual = X86Assembler::ConditionBE,
  54. GreaterThan = X86Assembler::ConditionG,
  55. GreaterThanOrEqual = X86Assembler::ConditionGE,
  56. LessThan = X86Assembler::ConditionL,
  57. LessThanOrEqual = X86Assembler::ConditionLE
  58. };
  59. enum ResultCondition {
  60. Overflow = X86Assembler::ConditionO,
  61. Signed = X86Assembler::ConditionS,
  62. PositiveOrZero = X86Assembler::ConditionNS,
  63. Zero = X86Assembler::ConditionE,
  64. NonZero = X86Assembler::ConditionNE
  65. };
  66. enum DoubleCondition {
  67. // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
  68. DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
  69. DoubleNotEqual = X86Assembler::ConditionNE,
  70. DoubleGreaterThan = X86Assembler::ConditionA,
  71. DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
  72. DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
  73. DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
  74. // If either operand is NaN, these conditions always evaluate to true.
  75. DoubleEqualOrUnordered = X86Assembler::ConditionE,
  76. DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
  77. DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
  78. DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
  79. DoubleLessThanOrUnordered = X86Assembler::ConditionB,
  80. DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
  81. };
  82. COMPILE_ASSERT(
  83. !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
  84. DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
  85. static const RegisterID stackPointerRegister = X86Registers::esp;
  86. #if ENABLE(JIT_CONSTANT_BLINDING)
  87. static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
  88. #if CPU(X86_64)
  89. static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
  90. #if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
  91. static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
  92. #endif
  93. #endif
  94. #endif
  95. // Integer arithmetic operations:
  96. //
  97. // Operations are typically two operand - operation(source, srcDst)
  98. // For many operations the source may be an TrustedImm32, the srcDst operand
  99. // may often be a memory location (explictly described using an Address
  100. // object).
  101. void add32(RegisterID src, RegisterID dest)
  102. {
  103. m_assembler.addl_rr(src, dest);
  104. }
  105. void add32(TrustedImm32 imm, Address address)
  106. {
  107. m_assembler.addl_im(imm.m_value, address.offset, address.base);
  108. }
  109. void add32(TrustedImm32 imm, RegisterID dest)
  110. {
  111. m_assembler.addl_ir(imm.m_value, dest);
  112. }
  113. void add32(Address src, RegisterID dest)
  114. {
  115. m_assembler.addl_mr(src.offset, src.base, dest);
  116. }
  117. void add32(RegisterID src, Address dest)
  118. {
  119. m_assembler.addl_rm(src, dest.offset, dest.base);
  120. }
  121. void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
  122. {
  123. m_assembler.leal_mr(imm.m_value, src, dest);
  124. }
  125. void and32(RegisterID src, RegisterID dest)
  126. {
  127. m_assembler.andl_rr(src, dest);
  128. }
  129. void and32(TrustedImm32 imm, RegisterID dest)
  130. {
  131. m_assembler.andl_ir(imm.m_value, dest);
  132. }
  133. void and32(RegisterID src, Address dest)
  134. {
  135. m_assembler.andl_rm(src, dest.offset, dest.base);
  136. }
  137. void and32(Address src, RegisterID dest)
  138. {
  139. m_assembler.andl_mr(src.offset, src.base, dest);
  140. }
  141. void and32(TrustedImm32 imm, Address address)
  142. {
  143. m_assembler.andl_im(imm.m_value, address.offset, address.base);
  144. }
  145. void and32(RegisterID op1, RegisterID op2, RegisterID dest)
  146. {
  147. if (op1 == op2)
  148. zeroExtend32ToPtr(op1, dest);
  149. else if (op1 == dest)
  150. and32(op2, dest);
  151. else {
  152. move(op2, dest);
  153. and32(op1, dest);
  154. }
  155. }
  156. void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
  157. {
  158. move(src, dest);
  159. and32(imm, dest);
  160. }
  161. void lshift32(RegisterID shift_amount, RegisterID dest)
  162. {
  163. ASSERT(shift_amount != dest);
  164. if (shift_amount == X86Registers::ecx)
  165. m_assembler.shll_CLr(dest);
  166. else {
  167. // On x86 we can only shift by ecx; if asked to shift by another register we'll
  168. // need rejig the shift amount into ecx first, and restore the registers afterwards.
  169. // If we dest is ecx, then shift the swapped register!
  170. swap(shift_amount, X86Registers::ecx);
  171. m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
  172. swap(shift_amount, X86Registers::ecx);
  173. }
  174. }
  175. void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
  176. {
  177. ASSERT(shift_amount != dest);
  178. if (src != dest)
  179. move(src, dest);
  180. lshift32(shift_amount, dest);
  181. }
  182. void lshift32(TrustedImm32 imm, RegisterID dest)
  183. {
  184. m_assembler.shll_i8r(imm.m_value, dest);
  185. }
  186. void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
  187. {
  188. if (src != dest)
  189. move(src, dest);
  190. lshift32(imm, dest);
  191. }
  192. void mul32(RegisterID src, RegisterID dest)
  193. {
  194. m_assembler.imull_rr(src, dest);
  195. }
  196. void mul32(Address src, RegisterID dest)
  197. {
  198. m_assembler.imull_mr(src.offset, src.base, dest);
  199. }
  200. void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
  201. {
  202. m_assembler.imull_i32r(src, imm.m_value, dest);
  203. }
  204. void neg32(RegisterID srcDest)
  205. {
  206. m_assembler.negl_r(srcDest);
  207. }
  208. void neg32(Address srcDest)
  209. {
  210. m_assembler.negl_m(srcDest.offset, srcDest.base);
  211. }
  212. void or32(RegisterID src, RegisterID dest)
  213. {
  214. m_assembler.orl_rr(src, dest);
  215. }
  216. void or32(TrustedImm32 imm, RegisterID dest)
  217. {
  218. m_assembler.orl_ir(imm.m_value, dest);
  219. }
  220. void or32(RegisterID src, Address dest)
  221. {
  222. m_assembler.orl_rm(src, dest.offset, dest.base);
  223. }
  224. void or32(Address src, RegisterID dest)
  225. {
  226. m_assembler.orl_mr(src.offset, src.base, dest);
  227. }
  228. void or32(TrustedImm32 imm, Address address)
  229. {
  230. m_assembler.orl_im(imm.m_value, address.offset, address.base);
  231. }
  232. void or32(RegisterID op1, RegisterID op2, RegisterID dest)
  233. {
  234. if (op1 == op2)
  235. zeroExtend32ToPtr(op1, dest);
  236. else if (op1 == dest)
  237. or32(op2, dest);
  238. else {
  239. move(op2, dest);
  240. or32(op1, dest);
  241. }
  242. }
  243. void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
  244. {
  245. move(src, dest);
  246. or32(imm, dest);
  247. }
  248. void rshift32(RegisterID shift_amount, RegisterID dest)
  249. {
  250. ASSERT(shift_amount != dest);
  251. if (shift_amount == X86Registers::ecx)
  252. m_assembler.sarl_CLr(dest);
  253. else {
  254. // On x86 we can only shift by ecx; if asked to shift by another register we'll
  255. // need rejig the shift amount into ecx first, and restore the registers afterwards.
  256. // If we dest is ecx, then shift the swapped register!
  257. swap(shift_amount, X86Registers::ecx);
  258. m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
  259. swap(shift_amount, X86Registers::ecx);
  260. }
  261. }
  262. void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
  263. {
  264. ASSERT(shift_amount != dest);
  265. if (src != dest)
  266. move(src, dest);
  267. rshift32(shift_amount, dest);
  268. }
  269. void rshift32(TrustedImm32 imm, RegisterID dest)
  270. {
  271. m_assembler.sarl_i8r(imm.m_value, dest);
  272. }
  273. void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
  274. {
  275. if (src != dest)
  276. move(src, dest);
  277. rshift32(imm, dest);
  278. }
  279. void urshift32(RegisterID shift_amount, RegisterID dest)
  280. {
  281. ASSERT(shift_amount != dest);
  282. if (shift_amount == X86Registers::ecx)
  283. m_assembler.shrl_CLr(dest);
  284. else {
  285. // On x86 we can only shift by ecx; if asked to shift by another register we'll
  286. // need rejig the shift amount into ecx first, and restore the registers afterwards.
  287. // If we dest is ecx, then shift the swapped register!
  288. swap(shift_amount, X86Registers::ecx);
  289. m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
  290. swap(shift_amount, X86Registers::ecx);
  291. }
  292. }
  293. void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
  294. {
  295. ASSERT(shift_amount != dest);
  296. if (src != dest)
  297. move(src, dest);
  298. urshift32(shift_amount, dest);
  299. }
  300. void urshift32(TrustedImm32 imm, RegisterID dest)
  301. {
  302. m_assembler.shrl_i8r(imm.m_value, dest);
  303. }
  304. void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
  305. {
  306. if (src != dest)
  307. move(src, dest);
  308. urshift32(imm, dest);
  309. }
  310. void sub32(RegisterID src, RegisterID dest)
  311. {
  312. m_assembler.subl_rr(src, dest);
  313. }
  314. void sub32(TrustedImm32 imm, RegisterID dest)
  315. {
  316. m_assembler.subl_ir(imm.m_value, dest);
  317. }
  318. void sub32(TrustedImm32 imm, Address address)
  319. {
  320. m_assembler.subl_im(imm.m_value, address.offset, address.base);
  321. }
  322. void sub32(Address src, RegisterID dest)
  323. {
  324. m_assembler.subl_mr(src.offset, src.base, dest);
  325. }
  326. void sub32(RegisterID src, Address dest)
  327. {
  328. m_assembler.subl_rm(src, dest.offset, dest.base);
  329. }
  330. void xor32(RegisterID src, RegisterID dest)
  331. {
  332. m_assembler.xorl_rr(src, dest);
  333. }
  334. void xor32(TrustedImm32 imm, Address dest)
  335. {
  336. if (imm.m_value == -1)
  337. m_assembler.notl_m(dest.offset, dest.base);
  338. else
  339. m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
  340. }
  341. void xor32(TrustedImm32 imm, RegisterID dest)
  342. {
  343. if (imm.m_value == -1)
  344. m_assembler.notl_r(dest);
  345. else
  346. m_assembler.xorl_ir(imm.m_value, dest);
  347. }
  348. void xor32(RegisterID src, Address dest)
  349. {
  350. m_assembler.xorl_rm(src, dest.offset, dest.base);
  351. }
  352. void xor32(Address src, RegisterID dest)
  353. {
  354. m_assembler.xorl_mr(src.offset, src.base, dest);
  355. }
  356. void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
  357. {
  358. if (op1 == op2)
  359. move(TrustedImm32(0), dest);
  360. else if (op1 == dest)
  361. xor32(op2, dest);
  362. else {
  363. move(op2, dest);
  364. xor32(op1, dest);
  365. }
  366. }
  367. void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
  368. {
  369. move(src, dest);
  370. xor32(imm, dest);
  371. }
  372. void sqrtDouble(FPRegisterID src, FPRegisterID dst)
  373. {
  374. m_assembler.sqrtsd_rr(src, dst);
  375. }
  376. void absDouble(FPRegisterID src, FPRegisterID dst)
  377. {
  378. ASSERT(src != dst);
  379. #if 0 // moved to JITStubEntries.cpp
  380. static const double negativeZeroConstant = -0.0;
  381. #endif
  382. DEFINE_REMOTE_VAR_PTR(double, negativeZeroConstant);
  383. loadDouble(REMOTE_VAR_PTR(negativeZeroConstant), dst);
  384. m_assembler.andnpd_rr(src, dst);
  385. }
  386. void negateDouble(FPRegisterID src, FPRegisterID dst)
  387. {
  388. ASSERT(src != dst);
  389. #if 0 // moved to JITStubEntries.cpp
  390. static const double negativeZeroConstant = -0.0;
  391. #endif
  392. DEFINE_REMOTE_VAR_PTR(double, negativeZeroConstant);
  393. loadDouble(REMOTE_VAR_PTR(negativeZeroConstant), dst);
  394. m_assembler.xorpd_rr(src, dst);
  395. }
  396. // Memory access operations:
  397. //
  398. // Loads are of the form load(address, destination) and stores of the form
  399. // store(source, address). The source for a store may be an TrustedImm32. Address
  400. // operand objects to loads and store will be implicitly constructed if a
  401. // register is passed.
  402. void load32(ImplicitAddress address, RegisterID dest)
  403. {
  404. m_assembler.movl_mr(address.offset, address.base, dest);
  405. }
  406. void load32(BaseIndex address, RegisterID dest)
  407. {
  408. m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
  409. }
  410. void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
  411. {
  412. load32(address, dest);
  413. }
  414. void load16Unaligned(BaseIndex address, RegisterID dest)
  415. {
  416. load16(address, dest);
  417. }
  418. DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
  419. {
  420. padBeforePatch();
  421. m_assembler.movl_mr_disp32(address.offset, address.base, dest);
  422. return DataLabel32(this);
  423. }
  424. DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
  425. {
  426. padBeforePatch();
  427. m_assembler.movl_mr_disp8(address.offset, address.base, dest);
  428. return DataLabelCompact(this);
  429. }
  430. static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
  431. #if ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT
  432. ;
  433. #else
  434. {
  435. ASSERT(isCompactPtrAlignedAddressOffset(value));
  436. AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
  437. }
  438. #endif
  439. DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
  440. {
  441. padBeforePatch();
  442. m_assembler.movl_mr_disp8(address.offset, address.base, dest);
  443. return DataLabelCompact(this);
  444. }
  445. void load8(BaseIndex address, RegisterID dest)
  446. {
  447. m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
  448. }
  449. void load8(ImplicitAddress address, RegisterID dest)
  450. {
  451. m_assembler.movzbl_mr(address.offset, address.base, dest);
  452. }
  453. void load8Signed(BaseIndex address, RegisterID dest)
  454. {
  455. m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
  456. }
  457. void load8Signed(ImplicitAddress address, RegisterID dest)
  458. {
  459. m_assembler.movsbl_mr(address.offset, address.base, dest);
  460. }
  461. void load16(BaseIndex address, RegisterID dest)
  462. {
  463. m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
  464. }
  465. void load16(Address address, RegisterID dest)
  466. {
  467. m_assembler.movzwl_mr(address.offset, address.base, dest);
  468. }
  469. void load16Signed(BaseIndex address, RegisterID dest)
  470. {
  471. m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
  472. }
  473. void load16Signed(Address address, RegisterID dest)
  474. {
  475. m_assembler.movswl_mr(address.offset, address.base, dest);
  476. }
  477. DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
  478. {
  479. padBeforePatch();
  480. m_assembler.movl_rm_disp32(src, address.offset, address.base);
  481. return DataLabel32(this);
  482. }
  483. void store32(RegisterID src, ImplicitAddress address)
  484. {
  485. m_assembler.movl_rm(src, address.offset, address.base);
  486. }
  487. void store32(RegisterID src, BaseIndex address)
  488. {
  489. m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
  490. }
  491. void store32(TrustedImm32 imm, ImplicitAddress address)
  492. {
  493. m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
  494. }
  495. void store32(TrustedImm32 imm, BaseIndex address)
  496. {
  497. m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
  498. }
  499. void store8(TrustedImm32 imm, Address address)
  500. {
  501. ASSERT(-128 <= imm.m_value && imm.m_value < 128);
  502. m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
  503. }
  504. void store8(TrustedImm32 imm, BaseIndex address)
  505. {
  506. ASSERT(-128 <= imm.m_value && imm.m_value < 128);
  507. m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
  508. }
  509. void store8(RegisterID src, BaseIndex address)
  510. {
  511. #if CPU(X86)
  512. // On 32-bit x86 we can only store from the first 4 registers;
  513. // esp..edi are mapped to the 'h' registers!
  514. if (src >= 4) {
  515. // Pick a temporary register.
  516. RegisterID temp;
  517. if (address.base != X86Registers::eax && address.index != X86Registers::eax)
  518. temp = X86Registers::eax;
  519. else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
  520. temp = X86Registers::ebx;
  521. else {
  522. ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
  523. temp = X86Registers::ecx;
  524. }
  525. // Swap to the temporary register to perform the store.
  526. swap(src, temp);
  527. m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
  528. swap(src, temp);
  529. return;
  530. }
  531. #endif
  532. m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
  533. }
  534. void store16(RegisterID src, BaseIndex address)
  535. {
  536. #if CPU(X86)
  537. // On 32-bit x86 we can only store from the first 4 registers;
  538. // esp..edi are mapped to the 'h' registers!
  539. if (src >= 4) {
  540. // Pick a temporary register.
  541. RegisterID temp;
  542. if (address.base != X86Registers::eax && address.index != X86Registers::eax)
  543. temp = X86Registers::eax;
  544. else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
  545. temp = X86Registers::ebx;
  546. else {
  547. ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
  548. temp = X86Registers::ecx;
  549. }
  550. // Swap to the temporary register to perform the store.
  551. swap(src, temp);
  552. m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
  553. swap(src, temp);
  554. return;
  555. }
  556. #endif
  557. m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
  558. }
  559. // Floating-point operation:
  560. //
  561. // Presently only supports SSE, not x87 floating point.
  562. void moveDouble(FPRegisterID src, FPRegisterID dest)
  563. {
  564. ASSERT(isSSE2Present());
  565. if (src != dest)
  566. m_assembler.movsd_rr(src, dest);
  567. }
  568. void loadDouble(const void* address, FPRegisterID dest)
  569. {
  570. #if CPU(X86)
  571. ASSERT(isSSE2Present());
  572. m_assembler.movsd_mr(address, dest);
  573. #else
  574. move(TrustedImmPtr(address), scratchRegister);
  575. loadDouble(scratchRegister, dest);
  576. #endif
  577. }
  578. void loadDouble(ImplicitAddress address, FPRegisterID dest)
  579. {
  580. ASSERT(isSSE2Present());
  581. m_assembler.movsd_mr(address.offset, address.base, dest);
  582. }
  583. void loadDouble(BaseIndex address, FPRegisterID dest)
  584. {
  585. ASSERT(isSSE2Present());
  586. m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
  587. }
  588. void loadFloat(BaseIndex address, FPRegisterID dest)
  589. {
  590. ASSERT(isSSE2Present());
  591. m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
  592. }
  593. void storeDouble(FPRegisterID src, ImplicitAddress address)
  594. {
  595. ASSERT(isSSE2Present());
  596. m_assembler.movsd_rm(src, address.offset, address.base);
  597. }
  598. void storeDouble(FPRegisterID src, BaseIndex address)
  599. {
  600. ASSERT(isSSE2Present());
  601. m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
  602. }
  603. void storeFloat(FPRegisterID src, BaseIndex address)
  604. {
  605. ASSERT(isSSE2Present());
  606. m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
  607. }
  608. void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
  609. {
  610. ASSERT(isSSE2Present());
  611. m_assembler.cvtsd2ss_rr(src, dst);
  612. }
  613. void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
  614. {
  615. ASSERT(isSSE2Present());
  616. m_assembler.cvtss2sd_rr(src, dst);
  617. }
  618. void addDouble(FPRegisterID src, FPRegisterID dest)
  619. {
  620. ASSERT(isSSE2Present());
  621. m_assembler.addsd_rr(src, dest);
  622. }
  623. void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
  624. {
  625. ASSERT(isSSE2Present());
  626. if (op1 == dest)
  627. addDouble(op2, dest);
  628. else {
  629. moveDouble(op2, dest);
  630. addDouble(op1, dest);
  631. }
  632. }
  633. void addDouble(Address src, FPRegisterID dest)
  634. {
  635. ASSERT(isSSE2Present());
  636. m_assembler.addsd_mr(src.offset, src.base, dest);
  637. }
  638. void divDouble(FPRegisterID src, FPRegisterID dest)
  639. {
  640. ASSERT(isSSE2Present());
  641. m_assembler.divsd_rr(src, dest);
  642. }
  643. void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
  644. {
  645. // B := A / B is invalid.
  646. ASSERT(op1 == dest || op2 != dest);
  647. moveDouble(op1, dest);
  648. divDouble(op2, dest);
  649. }
  650. void divDouble(Address src, FPRegisterID dest)
  651. {
  652. ASSERT(isSSE2Present());
  653. m_assembler.divsd_mr(src.offset, src.base, dest);
  654. }
  655. void subDouble(FPRegisterID src, FPRegisterID dest)
  656. {
  657. ASSERT(isSSE2Present());
  658. m_assembler.subsd_rr(src, dest);
  659. }
  660. void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
  661. {
  662. // B := A - B is invalid.
  663. ASSERT(op1 == dest || op2 != dest);
  664. moveDouble(op1, dest);
  665. subDouble(op2, dest);
  666. }
  667. void subDouble(Address src, FPRegisterID dest)
  668. {
  669. ASSERT(isSSE2Present());
  670. m_assembler.subsd_mr(src.offset, src.base, dest);
  671. }
  672. void mulDouble(FPRegisterID src, FPRegisterID dest)
  673. {
  674. ASSERT(isSSE2Present());
  675. m_assembler.mulsd_rr(src, dest);
  676. }
  677. void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
  678. {
  679. ASSERT(isSSE2Present());
  680. if (op1 == dest)
  681. mulDouble(op2, dest);
  682. else {
  683. moveDouble(op2, dest);
  684. mulDouble(op1, dest);
  685. }
  686. }
  687. void mulDouble(Address src, FPRegisterID dest)
  688. {
  689. ASSERT(isSSE2Present());
  690. m_assembler.mulsd_mr(src.offset, src.base, dest);
  691. }
  692. void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
  693. {
  694. ASSERT(isSSE2Present());
  695. m_assembler.cvtsi2sd_rr(src, dest);
  696. }
  697. void convertInt32ToDouble(Address src, FPRegisterID dest)
  698. {
  699. ASSERT(isSSE2Present());
  700. m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
  701. }
  702. Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
  703. {
  704. ASSERT(isSSE2Present());
  705. if (cond & DoubleConditionBitInvert)
  706. m_assembler.ucomisd_rr(left, right);
  707. else
  708. m_assembler.ucomisd_rr(right, left);
  709. if (cond == DoubleEqual) {
  710. if (left == right)
  711. return Jump(m_assembler.jnp());
  712. Jump isUnordered(m_assembler.jp());
  713. Jump result = Jump(m_assembler.je());
  714. isUnordered.link(this);
  715. return result;
  716. } else if (cond == DoubleNotEqualOrUnordered) {
  717. if (left == right)
  718. return Jump(m_assembler.jp());
  719. Jump isUnordered(m_assembler.jp());
  720. Jump isEqual(m_assembler.je());
  721. isUnordered.link(this);
  722. Jump result = jump();
  723. isEqual.link(this);
  724. return result;
  725. }
  726. ASSERT(!(cond & DoubleConditionBitSpecial));
  727. return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
  728. }
  729. // Truncates 'src' to an integer, and places the resulting 'dest'.
  730. // If the result is not representable as a 32 bit value, branch.
  731. // May also branch for some values that are representable in 32 bits
  732. // (specifically, in this case, INT_MIN).
  733. enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
  734. Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
  735. {
  736. ASSERT(isSSE2Present());
  737. m_assembler.cvttsd2si_rr(src, dest);
  738. return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
  739. }
  740. Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
  741. {
  742. ASSERT(isSSE2Present());
  743. m_assembler.cvttsd2si_rr(src, dest);
  744. return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0));
  745. }
  746. void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
  747. {
  748. ASSERT(isSSE2Present());
  749. m_assembler.cvttsd2si_rr(src, dest);
  750. }
  751. #if CPU(X86_64)
  752. void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
  753. {
  754. ASSERT(isSSE2Present());
  755. m_assembler.cvttsd2siq_rr(src, dest);
  756. }
  757. #endif
  758. // Convert 'src' to an integer, and places the resulting 'dest'.
  759. // If the result is not representable as a 32 bit value, branch.
  760. // May also branch for some values that are representable in 32 bits
  761. // (specifically, in this case, 0).
  762. void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
  763. {
  764. ASSERT(isSSE2Present());
  765. m_assembler.cvttsd2si_rr(src, dest);
  766. // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
  767. if (negZeroCheck)
  768. failureCases.append(branchTest32(Zero, dest));
  769. // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
  770. convertInt32ToDouble(dest, fpTemp);
  771. m_assembler.ucomisd_rr(fpTemp, src);
  772. failureCases.append(m_assembler.jp());
  773. failureCases.append(m_assembler.jne());
  774. }
  775. Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
  776. {
  777. ASSERT(isSSE2Present());
  778. m_assembler.xorpd_rr(scratch, scratch);
  779. return branchDouble(DoubleNotEqual, reg, scratch);
  780. }
  781. Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
  782. {
  783. ASSERT(isSSE2Present());
  784. m_assembler.xorpd_rr(scratch, scratch);
  785. return branchDouble(DoubleEqualOrUnordered, reg, scratch);
  786. }
  787. void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
  788. {
  789. ASSERT(isSSE2Present());
  790. m_assembler.psllq_i8r(imm.m_value, reg);
  791. }
  792. void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
  793. {
  794. ASSERT(isSSE2Present());
  795. m_assembler.psrlq_i8r(imm.m_value, reg);
  796. }
  797. void orPacked(XMMRegisterID src, XMMRegisterID dst)
  798. {
  799. ASSERT(isSSE2Present());
  800. m_assembler.por_rr(src, dst);
  801. }
  802. void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
  803. {
  804. ASSERT(isSSE2Present());
  805. m_assembler.movd_rr(src, dst);
  806. }
  807. void movePackedToInt32(XMMRegisterID src, RegisterID dst)
  808. {
  809. ASSERT(isSSE2Present());
  810. m_assembler.movd_rr(src, dst);
  811. }
  812. // Stack manipulation operations:
  813. //
  814. // The ABI is assumed to provide a stack abstraction to memory,
  815. // containing machine word sized units of data. Push and pop
  816. // operations add and remove a single register sized unit of data
  817. // to or from the stack. Peek and poke operations read or write
  818. // values on the stack, without moving the current stack position.
  819. void pop(RegisterID dest)
  820. {
  821. m_assembler.pop_r(dest);
  822. }
  823. void push(RegisterID src)
  824. {
  825. m_assembler.push_r(src);
  826. }
  827. void push(Address address)
  828. {
  829. m_assembler.push_m(address.offset, address.base);
  830. }
  831. void push(TrustedImm32 imm)
  832. {
  833. m_assembler.push_i32(imm.m_value);
  834. }
  835. // Register move operations:
  836. //
  837. // Move values in registers.
  838. void move(TrustedImm32 imm, RegisterID dest)
  839. {
  840. // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
  841. // may be useful to have a separate version that sign extends the value?
  842. if (!imm.m_value)
  843. m_assembler.xorl_rr(dest, dest);
  844. else
  845. m_assembler.movl_i32r(imm.m_value, dest);
  846. }
  847. #if CPU(X86_64)
  848. void move(RegisterID src, RegisterID dest)
  849. {
  850. // Note: on 64-bit this is is a full register move; perhaps it would be
  851. // useful to have separate move32 & movePtr, with move32 zero extending?
  852. if (src != dest)
  853. m_assembler.movq_rr(src, dest);
  854. }
  855. void move(TrustedImmPtr imm, RegisterID dest)
  856. {
  857. m_assembler.movq_i64r(imm.asIntptr(), dest);
  858. }
  859. void move(TrustedImm64 imm, RegisterID dest)
  860. {
  861. m_assembler.movq_i64r(imm.m_value, dest);
  862. }
  863. void swap(RegisterID reg1, RegisterID reg2)
  864. {
  865. if (reg1 != reg2)
  866. m_assembler.xchgq_rr(reg1, reg2);
  867. }
  868. void signExtend32ToPtr(RegisterID src, RegisterID dest)
  869. {
  870. m_assembler.movsxd_rr(src, dest);
  871. }
  872. void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
  873. {
  874. m_assembler.movl_rr(src, dest);
  875. }
  876. #else
  877. void move(RegisterID src, RegisterID dest)
  878. {
  879. if (src != dest)
  880. m_assembler.movl_rr(src, dest);
  881. }
  882. void move(TrustedImmPtr imm, RegisterID dest)
  883. {
  884. m_assembler.movl_i32r(imm.asIntptr(), dest);
  885. }
  886. void swap(RegisterID reg1, RegisterID reg2)
  887. {
  888. if (reg1 != reg2)
  889. m_assembler.xchgl_rr(reg1, reg2);
  890. }
  891. void signExtend32ToPtr(RegisterID src, RegisterID dest)
  892. {
  893. move(src, dest);
  894. }
  895. void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
  896. {
  897. move(src, dest);
  898. }
  899. #endif
  900. // Forwards / external control flow operations:
  901. //
  902. // This set of jump and conditional branch operations return a Jump
  903. // object which may linked at a later point, allow forwards jump,
  904. // or jumps that will require external linkage (after the code has been
  905. // relocated).
  906. //
  907. // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
  908. // respecitvely, for unsigned comparisons the names b, a, be, and ae are
  909. // used (representing the names 'below' and 'above').
  910. //
  911. // Operands to the comparision are provided in the expected order, e.g.
  912. // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
  913. // treated as a signed 32bit value, is less than or equal to 5.
  914. //
  915. // jz and jnz test whether the first operand is equal to zero, and take
  916. // an optional second operand of a mask under which to perform the test.
  917. public:
  918. Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
  919. {
  920. m_assembler.cmpb_im(right.m_value, left.offset, left.base);
  921. return Jump(m_assembler.jCC(x86Condition(cond)));
  922. }
  923. Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
  924. {
  925. m_assembler.cmpl_rr(right, left);
  926. return Jump(m_assembler.jCC(x86Condition(cond)));
  927. }
  928. Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
  929. {
  930. if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
  931. m_assembler.testl_rr(left, left);
  932. else
  933. m_assembler.cmpl_ir(right.m_value, left);
  934. return Jump(m_assembler.jCC(x86Condition(cond)));
  935. }
  936. Jump branch32(RelationalCondition cond, RegisterID left, Address right)
  937. {
  938. m_assembler.cmpl_mr(right.offset, right.base, left);
  939. return Jump(m_assembler.jCC(x86Condition(cond)));
  940. }
  941. Jump branch32(RelationalCondition cond, Address left, RegisterID right)
  942. {
  943. m_assembler.cmpl_rm(right, left.offset, left.base);
  944. return Jump(m_assembler.jCC(x86Condition(cond)));
  945. }
  946. Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
  947. {
  948. m_assembler.cmpl_im(right.m_value, left.offset, left.base);
  949. return Jump(m_assembler.jCC(x86Condition(cond)));
  950. }
  951. Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
  952. {
  953. m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
  954. return Jump(m_assembler.jCC(x86Condition(cond)));
  955. }
  956. Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
  957. {
  958. return branch32(cond, left, right);
  959. }
  960. Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
  961. {
  962. m_assembler.testl_rr(reg, mask);
  963. return Jump(m_assembler.jCC(x86Condition(cond)));
  964. }
  965. Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
  966. {
  967. // if we are only interested in the low seven bits, this can be tested with a testb
  968. if (mask.m_value == -1)
  969. m_assembler.testl_rr(reg, reg);
  970. else
  971. m_assembler.testl_i32r(mask.m_value, reg);
  972. return Jump(m_assembler.jCC(x86Condition(cond)));
  973. }
  974. Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
  975. {
  976. if (mask.m_value == -1)
  977. m_assembler.cmpl_im(0, address.offset, address.base);
  978. else
  979. m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
  980. return Jump(m_assembler.jCC(x86Condition(cond)));
  981. }
  982. Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
  983. {
  984. if (mask.m_value == -1)
  985. m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
  986. else
  987. m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
  988. return Jump(m_assembler.jCC(x86Condition(cond)));
  989. }
  990. Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
  991. {
  992. // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
  993. ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
  994. if (mask.m_value == -1)
  995. m_assembler.cmpb_im(0, address.offset, address.base);
  996. else
  997. m_assembler.testb_im(mask.m_value, address.offset, address.base);
  998. return Jump(m_assembler.jCC(x86Condition(cond)));
  999. }
  1000. Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
  1001. {
  1002. // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
  1003. ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
  1004. if (mask.m_value == -1)
  1005. m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
  1006. else
  1007. m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
  1008. return Jump(m_assembler.jCC(x86Condition(cond)));
  1009. }
  1010. Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
  1011. {
  1012. ASSERT(!(right.m_value & 0xFFFFFF00));
  1013. m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
  1014. return Jump(m_assembler.jCC(x86Condition(cond)));
  1015. }
  1016. Jump jump()
  1017. {
  1018. return Jump(m_assembler.jmp());
  1019. }
  1020. void jump(RegisterID target)
  1021. {
  1022. m_assembler.jmp_r(target);
  1023. }
  1024. // Address is a memory location containing the address to jump to
  1025. void jump(Address address)
  1026. {
  1027. m_assembler.jmp_m(address.offset, address.base);
  1028. }
  1029. // Arithmetic control flow operations:
  1030. //
  1031. // This set of conditional branch operations branch based
  1032. // on the result of an arithmetic operation. The operation
  1033. // is performed as normal, storing the result.
  1034. //
  1035. // * jz operations branch if the result is zero.
  1036. // * jo operations branch if the (signed) arithmetic
  1037. // operation caused an overflow to occur.
  1038. Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
  1039. {
  1040. add32(src, dest);
  1041. return Jump(m_assembler.jCC(x86Condition(cond)));
  1042. }
  1043. Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
  1044. {
  1045. add32(imm, dest);
  1046. return Jump(m_assembler.jCC(x86Condition(cond)));
  1047. }
  1048. Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
  1049. {
  1050. add32(src, dest);
  1051. return Jump(m_assembler.jCC(x86Condition(cond)));
  1052. }
  1053. Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
  1054. {
  1055. add32(src, dest);
  1056. return Jump(m_assembler.jCC(x86Condition(cond)));
  1057. }
  1058. Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
  1059. {
  1060. add32(src, dest);
  1061. return Jump(m_assembler.jCC(x86Condition(cond)));
  1062. }
  1063. Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
  1064. {
  1065. if (src1 == dest)
  1066. return branchAdd32(cond, src2, dest);
  1067. move(src2, dest);
  1068. return branchAdd32(cond, src1, dest);
  1069. }
  1070. Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
  1071. {
  1072. move(src, dest);
  1073. return branchAdd32(cond, imm, dest);
  1074. }
  1075. Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
  1076. {
  1077. mul32(src, dest);
  1078. if (cond != Overflow)
  1079. m_assembler.testl_rr(dest, dest);
  1080. return Jump(m_assembler.jCC(x86Condition(cond)));
  1081. }
  1082. Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
  1083. {
  1084. mul32(src, dest);
  1085. if (cond != Overflow)
  1086. m_assembler.testl_rr(dest, dest);
  1087. return Jump(m_assembler.jCC(x86Condition(cond)));
  1088. }
  1089. Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
  1090. {
  1091. mul32(imm, src, dest);
  1092. if (cond != Overflow)
  1093. m_assembler.testl_rr(dest, dest);
  1094. return Jump(m_assembler.jCC(x86Condition(cond)));
  1095. }
  1096. Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
  1097. {
  1098. if (src1 == dest)
  1099. return branchMul32(cond, src2, dest);
  1100. move(src2, dest);
  1101. return branchMul32(cond, src1, dest);
  1102. }
  1103. Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
  1104. {
  1105. sub32(src, dest);
  1106. return Jump(m_assembler.jCC(x86Condition(cond)));
  1107. }
  1108. Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
  1109. {
  1110. sub32(imm, dest);
  1111. return Jump(m_assembler.jCC(x86Condition(cond)));
  1112. }
  1113. Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
  1114. {
  1115. sub32(imm, dest);
  1116. return Jump(m_assembler.jCC(x86Condition(cond)));
  1117. }
  1118. Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
  1119. {
  1120. sub32(src, dest);
  1121. return Jump(m_assembler.jCC(x86Condition(cond)));
  1122. }
  1123. Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
  1124. {
  1125. sub32(src, dest);
  1126. return Jump(m_assembler.jCC(x86Condition(cond)));
  1127. }
  1128. Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
  1129. {
  1130. // B := A - B is invalid.
  1131. ASSERT(src1 == dest || src2 != dest);
  1132. move(src1, dest);
  1133. return branchSub32(cond, src2, dest);
  1134. }
  1135. Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
  1136. {
  1137. move(src1, dest);
  1138. return branchSub32(cond, src2, dest);
  1139. }
  1140. Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
  1141. {
  1142. neg32(srcDest);
  1143. return Jump(m_assembler.jCC(x86Condition(cond)));
  1144. }
  1145. Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
  1146. {
  1147. or32(src, dest);
  1148. return Jump(m_assembler.jCC(x86Condition(cond)));
  1149. }
  1150. // Miscellaneous operations:
  1151. void breakpoint()
  1152. {
  1153. #if OS(ORBIS)
  1154. m_assembler.int_imm(0x41);
  1155. #else
  1156. m_assembler.int3();
  1157. #endif
  1158. }
  1159. Call nearCall()
  1160. {
  1161. return Call(m_assembler.call(), Call::LinkableNear);
  1162. }
  1163. Call call(RegisterID target)
  1164. {
  1165. return Call(m_assembler.call(target), Call::None);
  1166. }
  1167. void call(Address address)
  1168. {
  1169. m_assembler.call_m(address.offset, address.base);
  1170. }
  1171. void ret()
  1172. {
  1173. m_assembler.ret();
  1174. }
  1175. void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
  1176. {
  1177. m_assembler.cmpb_im(right.m_value, left.offset, left.base);
  1178. set32(x86Condition(cond), dest);
  1179. }
  1180. void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
  1181. {
  1182. m_assembler.cmpl_rr(right, left);
  1183. set32(x86Condition(cond), dest);
  1184. }
  1185. void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
  1186. {
  1187. if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
  1188. m_assembler.testl_rr(left, left);
  1189. else
  1190. m_assembler.cmpl_ir(right.m_value, left);
  1191. set32(x86Condition(cond), dest);
  1192. }
  1193. // FIXME:
  1194. // The mask should be optional... perhaps the argument order should be
  1195. // dest-src, operations always have a dest? ... possibly not true, considering
  1196. // asm ops like test, or pseudo ops like pop().
  1197. void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
  1198. {
  1199. if (mask.m_value == -1)
  1200. m_assembler.cmpb_im(0, address.offset, address.base);
  1201. else
  1202. m_assembler.testb_im(mask.m_value, address.offset, address.base);
  1203. set32(x86Condition(cond), dest);
  1204. }
  1205. void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
  1206. {
  1207. if (mask.m_value == -1)
  1208. m_assembler.cmpl_im(0, address.offset, address.base);
  1209. else
  1210. m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
  1211. set32(x86Condition(cond), dest);
  1212. }
  1213. // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
  1214. static RelationalCondition invert(RelationalCondition cond)
  1215. {
  1216. return static_cast<RelationalCondition>(cond ^ 1);
  1217. }
  1218. void nop()
  1219. {
  1220. m_assembler.nop();
  1221. }
  1222. static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
  1223. #if ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT
  1224. ;
  1225. #else
  1226. {
  1227. X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
  1228. }
  1229. #endif
  1230. static ptrdiff_t maxJumpReplacementSize()
  1231. {
  1232. return X86Assembler::maxJumpReplacementSize();
  1233. }
  1234. protected:
  1235. X86Assembler::Condition x86Condition(RelationalCondition cond)
  1236. {
  1237. return static_cast<X86Assembler::Condition>(cond);
  1238. }
  1239. X86Assembler::Condition x86Condition(ResultCondition cond)
  1240. {
  1241. return static_cast<X86Assembler::Condition>(cond);
  1242. }
  1243. void set32(X86Assembler::Condition cond, RegisterID dest)
  1244. {
  1245. #if CPU(X86)
  1246. // On 32-bit x86 we can only set the first 4 registers;
  1247. // esp..edi are mapped to the 'h' registers!
  1248. if (dest >= 4) {
  1249. m_assembler.xchgl_rr(dest, X86Registers::eax);
  1250. m_assembler.setCC_r(cond, X86Registers::eax);
  1251. m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
  1252. m_assembler.xchgl_rr(dest, X86Registers::eax);
  1253. return;
  1254. }
  1255. #endif
  1256. m_assembler.setCC_r(cond, dest);
  1257. m_assembler.movzbl_rr(dest, dest);
  1258. }
  1259. private:
  1260. // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
  1261. // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
  1262. friend class MacroAssemblerX86;
  1263. #if CPU(X86)
  1264. #if OS(MAC_OS_X)
  1265. // All X86 Macs are guaranteed to support at least SSE2,
  1266. static bool isSSE2Present()
  1267. {
  1268. return true;
  1269. }
  1270. #else // OS(MAC_OS_X)
  1271. enum SSE2CheckState {
  1272. NotCheckedSSE2,
  1273. HasSSE2,
  1274. NoSSE2
  1275. };
  1276. static bool isSSE2Present()
  1277. {
  1278. if (s_sse2CheckState == NotCheckedSSE2) {
  1279. // Default the flags value to zero; if the compiler is
  1280. // not MSVC or GCC we will read this as SSE2 not present.
  1281. int flags = 0;
  1282. #if COMPILER(MSVC)
  1283. _asm {
  1284. mov eax, 1 // cpuid function 1 gives us the standard feature set
  1285. cpuid;
  1286. mov flags, edx;
  1287. }
  1288. #elif COMPILER(GCC)
  1289. asm (
  1290. "movl $0x1, %%eax;"
  1291. "pushl %%ebx;"
  1292. "cpuid;"
  1293. "popl %%ebx;"
  1294. "movl %%edx, %0;"
  1295. : "=g" (flags)
  1296. :
  1297. : "%eax", "%ecx", "%edx"
  1298. );
  1299. #endif
  1300. static const int SSE2FeatureBit = 1 << 26;
  1301. s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
  1302. }
  1303. // Only check once.
  1304. ASSERT(s_sse2CheckState != NotCheckedSSE2);
  1305. return s_sse2CheckState == HasSSE2;
  1306. }
  1307. static SSE2CheckState s_sse2CheckState;
  1308. #endif // OS(MAC_OS_X)
  1309. #elif !defined(NDEBUG) // CPU(X86)
  1310. // On x86-64 we should never be checking for SSE2 in a non-debug build,
  1311. // but non debug add this method to keep the asserts above happy.
  1312. static bool isSSE2Present()
  1313. {
  1314. return true;
  1315. }
  1316. #endif
  1317. };
  1318. } // namespace JSC
  1319. #endif // ENABLE(ASSEMBLER)
  1320. #endif // MacroAssemblerX86Common_h