ThunkGenerators.cpp 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
  14. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  15. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
  17. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  18. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  19. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  20. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  21. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  22. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  23. * THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #include "config.h"
  26. #include "ThunkGenerators.h"
  27. #include "CodeBlock.h"
  28. #include "Operations.h"
  29. #include "SpecializedThunkJIT.h"
  30. #include <wtf/InlineASM.h>
  31. #include <wtf/StringPrintStream.h>
  32. #include <wtf/text/StringImpl.h>
  33. #if ENABLE(JIT)
  34. #if !(ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT)
  35. namespace JSC {
  36. static JSInterfaceJIT::Call generateSlowCaseFor(VM* vm, JSInterfaceJIT& jit)
  37. {
  38. jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
  39. jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT2, JSInterfaceJIT::regT2);
  40. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2, JSStack::ScopeChain);
  41. // Also initialize ReturnPC and CodeBlock, like a JS function would.
  42. jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
  43. jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
  44. jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
  45. jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
  46. jit.restoreArgumentReference();
  47. JSInterfaceJIT::Call callNotJSFunction = jit.call();
  48. jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::callFrameRegister);
  49. jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
  50. jit.ret();
  51. return callNotJSFunction;
  52. }
  53. static MacroAssemblerCodeRef linkForGenerator(VM* vm, FunctionPtr lazyLink, FunctionPtr notJSFunction, const char* name)
  54. {
  55. JSInterfaceJIT jit;
  56. JSInterfaceJIT::JumpList slowCase;
  57. #if USE(JSVALUE64)
  58. slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
  59. slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
  60. #else // USE(JSVALUE64)
  61. slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
  62. slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
  63. #endif // USE(JSVALUE64)
  64. // Finish canonical initialization before JS function call.
  65. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
  66. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
  67. // Also initialize ReturnPC for use by lazy linking and exceptions.
  68. jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
  69. jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
  70. jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
  71. jit.restoreArgumentReference();
  72. JSInterfaceJIT::Call callLazyLink = jit.call();
  73. jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
  74. jit.jump(JSInterfaceJIT::regT0);
  75. slowCase.link(&jit);
  76. JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
  77. LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
  78. patchBuffer.link(callLazyLink, lazyLink);
  79. patchBuffer.link(callNotJSFunction, notJSFunction);
  80. return FINALIZE_CODE(patchBuffer, ("link %s trampoline", name));
  81. }
  82. MacroAssemblerCodeRef linkCallGenerator(VM* vm)
  83. {
  84. return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkCall), FunctionPtr(cti_op_call_NotJSFunction), "call");
  85. }
  86. MacroAssemblerCodeRef linkConstructGenerator(VM* vm)
  87. {
  88. return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkConstruct), FunctionPtr(cti_op_construct_NotJSConstruct), "construct");
  89. }
  90. MacroAssemblerCodeRef linkClosureCallGenerator(VM* vm)
  91. {
  92. return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkClosureCall), FunctionPtr(cti_op_call_NotJSFunction), "closure call");
  93. }
  94. static MacroAssemblerCodeRef virtualForGenerator(VM* vm, FunctionPtr compile, FunctionPtr notJSFunction, const char* name, CodeSpecializationKind kind)
  95. {
  96. JSInterfaceJIT jit;
  97. JSInterfaceJIT::JumpList slowCase;
  98. #if USE(JSVALUE64)
  99. slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
  100. #else // USE(JSVALUE64)
  101. slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
  102. #endif // USE(JSVALUE64)
  103. slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
  104. // Finish canonical initialization before JS function call.
  105. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
  106. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
  107. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
  108. JSInterfaceJIT::Jump hasCodeBlock1 = jit.branch32(JSInterfaceJIT::GreaterThanOrEqual, JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfNumParametersFor(kind)), JSInterfaceJIT::TrustedImm32(0));
  109. jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
  110. jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
  111. jit.restoreArgumentReference();
  112. JSInterfaceJIT::Call callCompile = jit.call();
  113. jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
  114. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
  115. hasCodeBlock1.link(&jit);
  116. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind)), JSInterfaceJIT::regT0);
  117. jit.jump(JSInterfaceJIT::regT0);
  118. slowCase.link(&jit);
  119. JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
  120. LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
  121. patchBuffer.link(callCompile, compile);
  122. patchBuffer.link(callNotJSFunction, notJSFunction);
  123. return FINALIZE_CODE(patchBuffer, ("virtual %s trampoline", name));
  124. }
  125. MacroAssemblerCodeRef virtualCallGenerator(VM* vm)
  126. {
  127. return virtualForGenerator(vm, FunctionPtr(cti_op_call_jitCompile), FunctionPtr(cti_op_call_NotJSFunction), "call", CodeForCall);
  128. }
  129. MacroAssemblerCodeRef virtualConstructGenerator(VM* vm)
  130. {
  131. return virtualForGenerator(vm, FunctionPtr(cti_op_construct_jitCompile), FunctionPtr(cti_op_construct_NotJSConstruct), "construct", CodeForConstruct);
  132. }
  133. MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
  134. {
  135. JSInterfaceJIT jit;
  136. #if USE(JSVALUE64)
  137. // Check eax is a string
  138. JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
  139. JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
  140. JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
  141. JSInterfaceJIT::regT0, JSCell::structureOffset()),
  142. JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
  143. // Checks out okay! - get the length from the Ustring.
  144. jit.load32(
  145. JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
  146. JSInterfaceJIT::regT0);
  147. JSInterfaceJIT::Jump failureCases3 = jit.branch32(
  148. JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
  149. // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
  150. jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
  151. #else // USE(JSVALUE64)
  152. // regT0 holds payload, regT1 holds tag
  153. JSInterfaceJIT::Jump failureCases1 = jit.branch32(
  154. JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
  155. JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
  156. JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
  157. JSInterfaceJIT::NotEqual,
  158. JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
  159. JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
  160. // Checks out okay! - get the length from the Ustring.
  161. jit.load32(
  162. JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
  163. JSInterfaceJIT::regT2);
  164. JSInterfaceJIT::Jump failureCases3 = jit.branch32(
  165. JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
  166. jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
  167. jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
  168. #endif // USE(JSVALUE64)
  169. jit.ret();
  170. JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
  171. JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
  172. JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
  173. LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
  174. patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
  175. patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
  176. patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
  177. return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
  178. }
  179. static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
  180. {
  181. int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
  182. JSInterfaceJIT jit;
  183. jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
  184. jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
  185. #if CPU(X86)
  186. // Load caller frame's scope chain into this callframe so that whatever we call can
  187. // get to its global data.
  188. jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
  189. jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
  190. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
  191. jit.peek(JSInterfaceJIT::regT1);
  192. jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
  193. // Calling convention: f(ecx, edx, ...);
  194. // Host function signature: f(ExecState*);
  195. jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
  196. jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
  197. // call the function
  198. jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
  199. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
  200. jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
  201. jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
  202. jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
  203. #elif CPU(X86_64)
  204. // Load caller frame's scope chain into this callframe so that whatever we call can
  205. // get to its global data.
  206. jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
  207. jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
  208. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
  209. jit.peek(JSInterfaceJIT::regT1);
  210. jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
  211. #if !OS(WINDOWS)
  212. // Calling convention: f(edi, esi, edx, ecx, ...);
  213. // Host function signature: f(ExecState*);
  214. jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
  215. jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
  216. jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
  217. jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
  218. jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
  219. jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
  220. jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
  221. #else
  222. // Calling convention: f(ecx, edx, r8, r9, ...);
  223. // Host function signature: f(ExecState*);
  224. jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
  225. // Leave space for the callee parameter home addresses and align the stack.
  226. jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
  227. jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
  228. jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
  229. jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
  230. jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
  231. jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
  232. #endif
  233. #elif CPU(ARM)
  234. // Load caller frame's scope chain into this callframe so that whatever we call can
  235. // get to its global data.
  236. jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
  237. jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
  238. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
  239. jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
  240. jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
  241. // Calling convention: f(r0 == regT0, r1 == regT1, ...);
  242. // Host function signature: f(ExecState*);
  243. jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
  244. jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
  245. jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
  246. jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
  247. jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
  248. jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
  249. #elif CPU(SH4)
  250. // Load caller frame's scope chain into this callframe so that whatever we call can
  251. // get to its global data.
  252. jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
  253. jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
  254. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
  255. jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
  256. jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
  257. // Calling convention: f(r0 == regT4, r1 == regT5, ...);
  258. // Host function signature: f(ExecState*);
  259. jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
  260. jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
  261. jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
  262. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
  263. jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
  264. jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
  265. #elif CPU(MIPS)
  266. // Load caller frame's scope chain into this callframe so that whatever we call can
  267. // get to its global data.
  268. jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
  269. jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
  270. jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
  271. jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
  272. jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
  273. // Calling convention: f(a0, a1, a2, a3);
  274. // Host function signature: f(ExecState*);
  275. // Allocate stack space for 16 bytes (8-byte aligned)
  276. // 16 bytes (unused) for 4 arguments
  277. jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
  278. // Setup arg0
  279. jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
  280. // Call
  281. jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
  282. jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
  283. jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
  284. jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
  285. // Restore stack space
  286. jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
  287. jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
  288. #else
  289. #error "JIT not supported on this platform."
  290. UNUSED_PARAM(executableOffsetToFunction);
  291. breakpoint();
  292. #endif
  293. // Check for an exception
  294. #if USE(JSVALUE64)
  295. jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
  296. JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
  297. #else
  298. JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
  299. JSInterfaceJIT::NotEqual,
  300. JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
  301. JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
  302. #endif
  303. // Return.
  304. jit.ret();
  305. // Handle an exception
  306. exceptionHandler.link(&jit);
  307. // Grab the return address.
  308. jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
  309. jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
  310. jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
  311. jit.poke(JSInterfaceJIT::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
  312. jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
  313. // Set the return address.
  314. jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), JSInterfaceJIT::regT1);
  315. jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1);
  316. jit.ret();
  317. LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
  318. return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
  319. }
  320. MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
  321. {
  322. return nativeForGenerator(vm, CodeForCall);
  323. }
  324. MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
  325. {
  326. return nativeForGenerator(vm, CodeForConstruct);
  327. }
  328. static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
  329. {
  330. // load string
  331. jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
  332. // Load string length to regT2, and start the process of loading the data pointer into regT0
  333. jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
  334. jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
  335. jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
  336. // load index
  337. jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
  338. // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
  339. jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
  340. // Load the character
  341. SpecializedThunkJIT::JumpList is16Bit;
  342. SpecializedThunkJIT::JumpList cont8Bit;
  343. // Load the string flags
  344. jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
  345. jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
  346. is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
  347. jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
  348. cont8Bit.append(jit.jump());
  349. is16Bit.link(&jit);
  350. jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
  351. cont8Bit.link(&jit);
  352. }
  353. static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
  354. {
  355. jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
  356. jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
  357. jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
  358. jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
  359. }
  360. MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
  361. {
  362. SpecializedThunkJIT jit(1);
  363. stringCharLoad(jit, vm);
  364. jit.returnInt32(SpecializedThunkJIT::regT0);
  365. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
  366. }
  367. MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
  368. {
  369. SpecializedThunkJIT jit(1);
  370. stringCharLoad(jit, vm);
  371. charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
  372. jit.returnJSCell(SpecializedThunkJIT::regT0);
  373. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charAt");
  374. }
  375. MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
  376. {
  377. SpecializedThunkJIT jit(1);
  378. // load char code
  379. jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
  380. charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
  381. jit.returnJSCell(SpecializedThunkJIT::regT0);
  382. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
  383. }
  384. MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
  385. {
  386. SpecializedThunkJIT jit(1);
  387. if (!jit.supportsFloatingPointSqrt())
  388. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  389. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  390. jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
  391. jit.returnDouble(SpecializedThunkJIT::fpRegT0);
  392. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "sqrt");
  393. }
  394. #define UnaryDoubleOpWrapper(function) function##Wrapper
  395. enum MathThunkCallingConvention { };
  396. typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
  397. extern "C" {
  398. double jsRound(double) REFERENCED_FROM_ASM;
  399. double jsRound(double d)
  400. {
  401. double integer = ceil(d);
  402. return integer - (integer - d > 0.5);
  403. }
  404. }
  405. #if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
  406. #define defineUnaryDoubleOpWrapper(function) \
  407. asm( \
  408. ".text\n" \
  409. ".globl " SYMBOL_STRING(function##Thunk) "\n" \
  410. HIDE_SYMBOL(function##Thunk) "\n" \
  411. SYMBOL_STRING(function##Thunk) ":" "\n" \
  412. "call " GLOBAL_REFERENCE(function) "\n" \
  413. "ret\n" \
  414. );\
  415. extern "C" { \
  416. MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
  417. } \
  418. static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
  419. #elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
  420. #define defineUnaryDoubleOpWrapper(function) \
  421. asm( \
  422. ".text\n" \
  423. ".globl " SYMBOL_STRING(function##Thunk) "\n" \
  424. HIDE_SYMBOL(function##Thunk) "\n" \
  425. SYMBOL_STRING(function##Thunk) ":" "\n" \
  426. "subl $8, %esp\n" \
  427. "movsd %xmm0, (%esp) \n" \
  428. "call " GLOBAL_REFERENCE(function) "\n" \
  429. "fstpl (%esp) \n" \
  430. "movsd (%esp), %xmm0 \n" \
  431. "addl $8, %esp\n" \
  432. "ret\n" \
  433. );\
  434. extern "C" { \
  435. MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
  436. } \
  437. static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
  438. #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
  439. #define defineUnaryDoubleOpWrapper(function) \
  440. asm( \
  441. ".text\n" \
  442. ".align 2\n" \
  443. ".globl " SYMBOL_STRING(function##Thunk) "\n" \
  444. HIDE_SYMBOL(function##Thunk) "\n" \
  445. ".thumb\n" \
  446. ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
  447. SYMBOL_STRING(function##Thunk) ":" "\n" \
  448. "push {lr}\n" \
  449. "vmov r0, r1, d0\n" \
  450. "blx " GLOBAL_REFERENCE(function) "\n" \
  451. "vmov d0, r0, r1\n" \
  452. "pop {lr}\n" \
  453. "bx lr\n" \
  454. ); \
  455. extern "C" { \
  456. MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
  457. } \
  458. static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
  459. #else
  460. #define defineUnaryDoubleOpWrapper(function) \
  461. static MathThunk UnaryDoubleOpWrapper(function) = 0
  462. #endif
  463. defineUnaryDoubleOpWrapper(jsRound);
  464. defineUnaryDoubleOpWrapper(exp);
  465. defineUnaryDoubleOpWrapper(log);
  466. defineUnaryDoubleOpWrapper(floor);
  467. defineUnaryDoubleOpWrapper(ceil);
  468. #if 0 // moved to JITStubEntries.cpp
  469. static const double oneConstant = 1.0;
  470. static const double negativeHalfConstant = -0.5;
  471. static const double zeroConstant = 0.0;
  472. static const double halfConstant = 0.5;
  473. #endif
  474. MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
  475. {
  476. DEFINE_REMOTE_VAR_PTR(double, zeroConstant);
  477. SpecializedThunkJIT jit(1);
  478. MacroAssembler::Jump nonIntJump;
  479. if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
  480. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  481. jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
  482. jit.returnInt32(SpecializedThunkJIT::regT0);
  483. nonIntJump.link(&jit);
  484. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  485. SpecializedThunkJIT::Jump intResult;
  486. SpecializedThunkJIT::JumpList doubleResult;
  487. if (jit.supportsFloatingPointTruncate()) {
  488. jit.loadDouble(REMOTE_VAR_PTR(zeroConstant), SpecializedThunkJIT::fpRegT1);
  489. doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
  490. SpecializedThunkJIT::JumpList slowPath;
  491. // Handle the negative doubles in the slow path for now.
  492. slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
  493. slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
  494. intResult = jit.jump();
  495. slowPath.link(&jit);
  496. }
  497. jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
  498. jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
  499. if (jit.supportsFloatingPointTruncate())
  500. intResult.link(&jit);
  501. jit.returnInt32(SpecializedThunkJIT::regT0);
  502. doubleResult.link(&jit);
  503. jit.returnDouble(SpecializedThunkJIT::fpRegT0);
  504. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "floor");
  505. }
  506. MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
  507. {
  508. SpecializedThunkJIT jit(1);
  509. if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
  510. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  511. MacroAssembler::Jump nonIntJump;
  512. jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
  513. jit.returnInt32(SpecializedThunkJIT::regT0);
  514. nonIntJump.link(&jit);
  515. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  516. jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
  517. SpecializedThunkJIT::JumpList doubleResult;
  518. jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
  519. jit.returnInt32(SpecializedThunkJIT::regT0);
  520. doubleResult.link(&jit);
  521. jit.returnDouble(SpecializedThunkJIT::fpRegT0);
  522. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "ceil");
  523. }
  524. MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
  525. {
  526. DEFINE_REMOTE_VAR_PTR(double, zeroConstant);
  527. DEFINE_REMOTE_VAR_PTR(double, halfConstant);
  528. SpecializedThunkJIT jit(1);
  529. if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
  530. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  531. MacroAssembler::Jump nonIntJump;
  532. jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
  533. jit.returnInt32(SpecializedThunkJIT::regT0);
  534. nonIntJump.link(&jit);
  535. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  536. SpecializedThunkJIT::Jump intResult;
  537. SpecializedThunkJIT::JumpList doubleResult;
  538. if (jit.supportsFloatingPointTruncate()) {
  539. jit.loadDouble(REMOTE_VAR_PTR(zeroConstant), SpecializedThunkJIT::fpRegT1);
  540. doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
  541. SpecializedThunkJIT::JumpList slowPath;
  542. // Handle the negative doubles in the slow path for now.
  543. slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
  544. jit.loadDouble(REMOTE_VAR_PTR(halfConstant), SpecializedThunkJIT::fpRegT1);
  545. jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
  546. slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
  547. intResult = jit.jump();
  548. slowPath.link(&jit);
  549. }
  550. jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
  551. jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
  552. if (jit.supportsFloatingPointTruncate())
  553. intResult.link(&jit);
  554. jit.returnInt32(SpecializedThunkJIT::regT0);
  555. doubleResult.link(&jit);
  556. jit.returnDouble(SpecializedThunkJIT::fpRegT0);
  557. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "round");
  558. }
  559. MacroAssemblerCodeRef expThunkGenerator(VM* vm)
  560. {
  561. if (!UnaryDoubleOpWrapper(exp))
  562. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  563. SpecializedThunkJIT jit(1);
  564. if (!jit.supportsFloatingPoint())
  565. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  566. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  567. jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
  568. jit.returnDouble(SpecializedThunkJIT::fpRegT0);
  569. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "exp");
  570. }
  571. MacroAssemblerCodeRef logThunkGenerator(VM* vm)
  572. {
  573. if (!UnaryDoubleOpWrapper(log))
  574. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  575. SpecializedThunkJIT jit(1);
  576. if (!jit.supportsFloatingPoint())
  577. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  578. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  579. jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
  580. jit.returnDouble(SpecializedThunkJIT::fpRegT0);
  581. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "log");
  582. }
  583. MacroAssemblerCodeRef absThunkGenerator(VM* vm)
  584. {
  585. SpecializedThunkJIT jit(1);
  586. if (!jit.supportsFloatingPointAbs())
  587. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  588. MacroAssembler::Jump nonIntJump;
  589. jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
  590. jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
  591. jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
  592. jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
  593. jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
  594. jit.returnInt32(SpecializedThunkJIT::regT0);
  595. nonIntJump.link(&jit);
  596. // Shame about the double int conversion here.
  597. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  598. jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
  599. jit.returnDouble(SpecializedThunkJIT::fpRegT1);
  600. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "abs");
  601. }
  602. MacroAssemblerCodeRef powThunkGenerator(VM* vm)
  603. {
  604. DEFINE_REMOTE_VAR_PTR(double, oneConstant);
  605. DEFINE_REMOTE_VAR_PTR(double, negativeHalfConstant);
  606. SpecializedThunkJIT jit(2);
  607. if (!jit.supportsFloatingPoint())
  608. return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
  609. jit.loadDouble(REMOTE_VAR_PTR(oneConstant), SpecializedThunkJIT::fpRegT1);
  610. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  611. MacroAssembler::Jump nonIntExponent;
  612. jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
  613. jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
  614. MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
  615. MacroAssembler::Label startLoop(jit.label());
  616. MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
  617. jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
  618. exponentIsEven.link(&jit);
  619. jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
  620. jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
  621. jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
  622. exponentIsZero.link(&jit);
  623. {
  624. SpecializedThunkJIT::JumpList doubleResult;
  625. jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
  626. jit.returnInt32(SpecializedThunkJIT::regT0);
  627. doubleResult.link(&jit);
  628. jit.returnDouble(SpecializedThunkJIT::fpRegT1);
  629. }
  630. if (jit.supportsFloatingPointSqrt()) {
  631. nonIntExponent.link(&jit);
  632. jit.loadDouble(REMOTE_VAR_PTR(negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
  633. jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
  634. jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
  635. jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
  636. jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
  637. jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
  638. SpecializedThunkJIT::JumpList doubleResult;
  639. jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
  640. jit.returnInt32(SpecializedThunkJIT::regT0);
  641. doubleResult.link(&jit);
  642. jit.returnDouble(SpecializedThunkJIT::fpRegT1);
  643. } else
  644. jit.appendFailure(nonIntExponent);
  645. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "pow");
  646. }
  647. MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
  648. {
  649. SpecializedThunkJIT jit(2);
  650. MacroAssembler::Jump nonIntArg0Jump;
  651. jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
  652. SpecializedThunkJIT::Label doneLoadingArg0(&jit);
  653. MacroAssembler::Jump nonIntArg1Jump;
  654. jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
  655. SpecializedThunkJIT::Label doneLoadingArg1(&jit);
  656. jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
  657. jit.returnInt32(SpecializedThunkJIT::regT0);
  658. if (jit.supportsFloatingPointTruncate()) {
  659. nonIntArg0Jump.link(&jit);
  660. jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
  661. jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
  662. jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
  663. jit.jump(doneLoadingArg0);
  664. } else
  665. jit.appendFailure(nonIntArg0Jump);
  666. if (jit.supportsFloatingPointTruncate()) {
  667. nonIntArg1Jump.link(&jit);
  668. jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
  669. jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
  670. jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
  671. jit.jump(doneLoadingArg1);
  672. } else
  673. jit.appendFailure(nonIntArg1Jump);
  674. return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "imul");
  675. }
  676. }
  677. #endif // #if !(ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT)
  678. #endif // ENABLE(JIT)