MacroAssemblerX86.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*
  2. * Copyright (C) 2008 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #ifndef MacroAssemblerX86_h
  26. #define MacroAssemblerX86_h
  27. #if ENABLE(ASSEMBLER) && CPU(X86)
  28. #include "MacroAssemblerX86Common.h"
  29. namespace JSC {
  30. class MacroAssemblerX86 : public MacroAssemblerX86Common {
  31. public:
  32. static const Scale ScalePtr = TimesFour;
  33. using MacroAssemblerX86Common::add32;
  34. using MacroAssemblerX86Common::and32;
  35. using MacroAssemblerX86Common::branchAdd32;
  36. using MacroAssemblerX86Common::branchSub32;
  37. using MacroAssemblerX86Common::sub32;
  38. using MacroAssemblerX86Common::or32;
  39. using MacroAssemblerX86Common::load32;
  40. using MacroAssemblerX86Common::store32;
  41. using MacroAssemblerX86Common::store8;
  42. using MacroAssemblerX86Common::branch32;
  43. using MacroAssemblerX86Common::call;
  44. using MacroAssemblerX86Common::jump;
  45. using MacroAssemblerX86Common::addDouble;
  46. using MacroAssemblerX86Common::loadDouble;
  47. using MacroAssemblerX86Common::storeDouble;
  48. using MacroAssemblerX86Common::convertInt32ToDouble;
  49. using MacroAssemblerX86Common::branchTest8;
  50. void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
  51. {
  52. m_assembler.leal_mr(imm.m_value, src, dest);
  53. }
  54. void add32(TrustedImm32 imm, AbsoluteAddress address)
  55. {
  56. m_assembler.addl_im(imm.m_value, address.m_ptr);
  57. }
  58. void add32(AbsoluteAddress address, RegisterID dest)
  59. {
  60. m_assembler.addl_mr(address.m_ptr, dest);
  61. }
  62. void add64(TrustedImm32 imm, AbsoluteAddress address)
  63. {
  64. m_assembler.addl_im(imm.m_value, address.m_ptr);
  65. m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast<const char*>(address.m_ptr) + sizeof(int32_t));
  66. }
  67. void and32(TrustedImm32 imm, AbsoluteAddress address)
  68. {
  69. m_assembler.andl_im(imm.m_value, address.m_ptr);
  70. }
  71. void or32(TrustedImm32 imm, AbsoluteAddress address)
  72. {
  73. m_assembler.orl_im(imm.m_value, address.m_ptr);
  74. }
  75. void or32(RegisterID reg, AbsoluteAddress address)
  76. {
  77. m_assembler.orl_rm(reg, address.m_ptr);
  78. }
  79. void sub32(TrustedImm32 imm, AbsoluteAddress address)
  80. {
  81. m_assembler.subl_im(imm.m_value, address.m_ptr);
  82. }
  83. void load32(const void* address, RegisterID dest)
  84. {
  85. m_assembler.movl_mr(address, dest);
  86. }
  87. ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
  88. {
  89. ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
  90. m_assembler.movl_mr(address.offset, address.base, dest);
  91. return result;
  92. }
  93. void addDouble(AbsoluteAddress address, FPRegisterID dest)
  94. {
  95. m_assembler.addsd_mr(address.m_ptr, dest);
  96. }
  97. void storeDouble(FPRegisterID src, const void* address)
  98. {
  99. ASSERT(isSSE2Present());
  100. m_assembler.movsd_rm(src, address);
  101. }
  102. void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
  103. {
  104. m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
  105. }
  106. void store32(TrustedImm32 imm, void* address)
  107. {
  108. m_assembler.movl_i32m(imm.m_value, address);
  109. }
  110. void store32(RegisterID src, void* address)
  111. {
  112. m_assembler.movl_rm(src, address);
  113. }
  114. void store8(TrustedImm32 imm, void* address)
  115. {
  116. ASSERT(-128 <= imm.m_value && imm.m_value < 128);
  117. m_assembler.movb_i8m(imm.m_value, address);
  118. }
  119. // Possibly clobbers src.
  120. void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
  121. {
  122. movePackedToInt32(src, dest1);
  123. rshiftPacked(TrustedImm32(32), src);
  124. movePackedToInt32(src, dest2);
  125. }
  126. void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
  127. {
  128. moveInt32ToPacked(src1, dest);
  129. moveInt32ToPacked(src2, scratch);
  130. lshiftPacked(TrustedImm32(32), scratch);
  131. orPacked(scratch, dest);
  132. }
  133. Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
  134. {
  135. m_assembler.addl_im(imm.m_value, dest.m_ptr);
  136. return Jump(m_assembler.jCC(x86Condition(cond)));
  137. }
  138. Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
  139. {
  140. m_assembler.subl_im(imm.m_value, dest.m_ptr);
  141. return Jump(m_assembler.jCC(x86Condition(cond)));
  142. }
  143. Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
  144. {
  145. m_assembler.cmpl_rm(right, left.m_ptr);
  146. return Jump(m_assembler.jCC(x86Condition(cond)));
  147. }
  148. Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
  149. {
  150. m_assembler.cmpl_im(right.m_value, left.m_ptr);
  151. return Jump(m_assembler.jCC(x86Condition(cond)));
  152. }
  153. Call call()
  154. {
  155. return Call(m_assembler.call(), Call::Linkable);
  156. }
  157. // Address is a memory location containing the address to jump to
  158. void jump(AbsoluteAddress address)
  159. {
  160. m_assembler.jmp_m(address.m_ptr);
  161. }
  162. Call tailRecursiveCall()
  163. {
  164. return Call::fromTailJump(jump());
  165. }
  166. Call makeTailRecursiveCall(Jump oldJump)
  167. {
  168. return Call::fromTailJump(oldJump);
  169. }
  170. DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
  171. {
  172. padBeforePatch();
  173. m_assembler.movl_i32r(initialValue.asIntptr(), dest);
  174. return DataLabelPtr(this);
  175. }
  176. Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
  177. {
  178. ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
  179. if (mask.m_value == -1)
  180. m_assembler.cmpb_im(0, address.m_ptr);
  181. else
  182. m_assembler.testb_im(mask.m_value, address.m_ptr);
  183. return Jump(m_assembler.jCC(x86Condition(cond)));
  184. }
  185. Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
  186. {
  187. padBeforePatch();
  188. m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
  189. dataLabel = DataLabelPtr(this);
  190. return Jump(m_assembler.jCC(x86Condition(cond)));
  191. }
  192. Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
  193. {
  194. padBeforePatch();
  195. m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
  196. dataLabel = DataLabelPtr(this);
  197. return Jump(m_assembler.jCC(x86Condition(cond)));
  198. }
  199. DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
  200. {
  201. padBeforePatch();
  202. m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
  203. return DataLabelPtr(this);
  204. }
  205. static bool supportsFloatingPoint() { return isSSE2Present(); }
  206. // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
  207. static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
  208. static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
  209. static bool supportsFloatingPointAbs() { return isSSE2Present(); }
  210. static FunctionPtr readCallTarget(CodeLocationCall call)
  211. {
  212. intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1];
  213. return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset));
  214. }
  215. static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
  216. static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
  217. {
  218. const int opcodeBytes = 1;
  219. const int modRMBytes = 1;
  220. const int immediateBytes = 4;
  221. const int totalBytes = opcodeBytes + modRMBytes + immediateBytes;
  222. ASSERT(totalBytes >= maxJumpReplacementSize());
  223. return label.labelAtOffset(-totalBytes);
  224. }
  225. static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
  226. {
  227. const int opcodeBytes = 1;
  228. const int modRMBytes = 1;
  229. const int offsetBytes = 0;
  230. const int immediateBytes = 4;
  231. const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
  232. ASSERT(totalBytes >= maxJumpReplacementSize());
  233. return label.labelAtOffset(-totalBytes);
  234. }
  235. static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
  236. {
  237. X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg);
  238. }
  239. static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue)
  240. {
  241. ASSERT(!address.offset);
  242. X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base);
  243. }
  244. private:
  245. friend class LinkBuffer;
  246. friend class RepatchBuffer;
  247. static void linkCall(void* code, Call call, FunctionPtr function)
  248. {
  249. X86Assembler::linkCall(code, call.m_label, function.value());
  250. }
  251. static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
  252. {
  253. X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
  254. }
  255. static void repatchCall(CodeLocationCall call, FunctionPtr destination)
  256. {
  257. X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
  258. }
  259. };
  260. } // namespace JSC
  261. #endif // ENABLE(ASSEMBLER)
  262. #endif // MacroAssemblerX86_h