DFGJITCompiler.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /*
  2. * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #include "config.h"
  26. #include "DFGJITCompiler.h"
  27. #if ENABLE(DFG_JIT)
  28. #include "CodeBlock.h"
  29. #include "DFGOSRExitCompiler.h"
  30. #include "DFGOperations.h"
  31. #include "DFGRegisterBank.h"
  32. #include "DFGSlowPathGenerator.h"
  33. #include "DFGSpeculativeJIT.h"
  34. #include "DFGThunks.h"
  35. #include "JSCJSValueInlines.h"
  36. #include "VM.h"
  37. #include "LinkBuffer.h"
  38. #if ENABLE(DETACHED_JIT)
  39. #pragma message "[SECURE JSCORE] JITCompiler::m_disassembler not supported. see DFGJITCompiler.h for all references"
  40. #endif
  41. namespace JSC { namespace DFG {
  42. JITCompiler::JITCompiler(Graph& dfg)
  43. : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
  44. , m_graph(dfg)
  45. , m_currentCodeOriginIndex(0)
  46. {
  47. #if ENABLE(DETACHED_JIT)
  48. #pragma message "[SECURE JSCORE] profiler not supported"
  49. RELEASE_ASSERT(!shouldShowDisassembly());
  50. #else
  51. if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
  52. m_disassembler = adoptPtr(new Disassembler(dfg));
  53. #endif
  54. }
  55. #if !(ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT)
  56. void JITCompiler::linkOSRExits()
  57. {
  58. ASSERT(codeBlock()->numberOfOSRExits() == m_exitCompilationInfo.size());
  59. #if ENABLE(DETACHED_JIT)
  60. #pragma message "[SECURE JSCORE] profiler not supported"
  61. #else
  62. if (m_graph.m_compilation) {
  63. for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
  64. OSRExit& exit = codeBlock()->osrExit(i);
  65. Vector<Label> labels;
  66. if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max()) {
  67. OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
  68. for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
  69. labels.append(info.m_failureJumps.jumps()[j].label());
  70. } else
  71. labels.append(codeBlock()->watchpoint(exit.m_watchpointIndex).sourceLabel());
  72. m_exitSiteLabels.append(labels);
  73. }
  74. }
  75. #endif
  76. for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
  77. OSRExit& exit = codeBlock()->osrExit(i);
  78. JumpList& failureJumps = m_exitCompilationInfo[i].m_failureJumps;
  79. ASSERT(failureJumps.empty() == (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()));
  80. if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max())
  81. failureJumps.link(this);
  82. else
  83. codeBlock()->watchpoint(exit.m_watchpointIndex).setDestination(label());
  84. jitAssertHasValidCallFrame();
  85. store32(TrustedImm32(i), &vm()->osrExitIndex);
  86. exit.setPatchableCodeOffset(patchableJump());
  87. }
  88. }
  89. void JITCompiler::compileEntry()
  90. {
  91. // This code currently matches the old JIT. In the function header we need to
  92. // pop the return address (since we do not allow any recursion on the machine
  93. // stack), and perform a fast stack check.
  94. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
  95. // We'll need to convert the remaining cti_ style calls (specifically the stack
  96. // check) which will be dependent on stack layout. (We'd need to account for this in
  97. // both normal return code and when jumping to an exception handler).
  98. preserveReturnAddressAfterCall(GPRInfo::regT2);
  99. emitPutToCallFrameHeader(GPRInfo::regT2, JSStack::ReturnPC);
  100. emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
  101. }
  102. void JITCompiler::compileBody(SpeculativeJIT& speculative)
  103. {
  104. // We generate the speculative code path, followed by OSR exit code to return
  105. // to the old JIT code if speculations fail.
  106. #if DFG_ENABLE(JIT_BREAK_ON_EVERY_FUNCTION)
  107. // Handy debug tool!
  108. breakpoint();
  109. #endif
  110. bool compiledSpeculative = speculative.compile();
  111. ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
  112. }
  113. void JITCompiler::compileExceptionHandlers()
  114. {
  115. // Iterate over the m_calls vector, checking for jumps to link.
  116. bool didLinkExceptionCheck = false;
  117. for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
  118. Jump& exceptionCheck = m_exceptionChecks[i].m_exceptionCheck;
  119. if (exceptionCheck.isSet()) {
  120. exceptionCheck.link(this);
  121. didLinkExceptionCheck = true;
  122. }
  123. }
  124. // If any exception checks were linked, generate code to lookup a handler.
  125. if (didLinkExceptionCheck) {
  126. // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
  127. // the index into the CodeBlock's callReturnIndexVector corresponding to the
  128. // call that threw the exception (this was set in nonPreservedNonReturnGPR, when
  129. // the exception check was planted).
  130. move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
  131. move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
  132. #if CPU(X86)
  133. // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
  134. poke(GPRInfo::argumentGPR0);
  135. poke(GPRInfo::argumentGPR1, 1);
  136. #endif
  137. m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
  138. // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
  139. // and the address of the handler in returnValueGPR2.
  140. jump(GPRInfo::returnValueGPR2);
  141. }
  142. }
  143. void JITCompiler::link(LinkBuffer& linkBuffer)
  144. {
  145. // Link the code, populate data in CodeBlock data structures.
  146. #if DFG_ENABLE(DEBUG_VERBOSE)
  147. dataLogF("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
  148. #endif
  149. // Link all calls out from the JIT code to their respective functions.
  150. for (unsigned i = 0; i < m_calls.size(); ++i)
  151. linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
  152. m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
  153. for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
  154. unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
  155. CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
  156. while (codeOrigin.inlineCallFrame)
  157. codeOrigin = codeOrigin.inlineCallFrame->caller;
  158. unsigned exceptionInfo = codeOrigin.bytecodeIndex;
  159. m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
  160. }
  161. Vector_shared<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins = m_codeBlock->codeOrigins();
  162. codeOrigins.resize(m_exceptionChecks.size());
  163. for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
  164. CallExceptionRecord& record = m_exceptionChecks[i];
  165. unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
  166. codeOrigins[i].codeOrigin = record.m_codeOrigin;
  167. codeOrigins[i].callReturnOffset = returnAddressOffset;
  168. }
  169. m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
  170. for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
  171. StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
  172. CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call());
  173. info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
  174. info.callReturnLocation = callReturnLocation;
  175. info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation);
  176. info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck));
  177. #if USE(JSVALUE64)
  178. info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore));
  179. #else
  180. info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore));
  181. info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore));
  182. #endif
  183. info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label()));
  184. info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done));
  185. info.patch.dfg.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad));
  186. info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
  187. #if USE(JSVALUE64)
  188. info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
  189. #else
  190. info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
  191. info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
  192. #endif
  193. m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters);
  194. info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
  195. }
  196. m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
  197. for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
  198. CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
  199. info.callType = m_jsCalls[i].m_callType;
  200. info.isDFG = true;
  201. info.codeOrigin = m_jsCalls[i].m_codeOrigin;
  202. linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress()));
  203. info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall);
  204. info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
  205. info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
  206. info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee);
  207. }
  208. MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
  209. CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
  210. for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
  211. OSRExit& exit = codeBlock()->osrExit(i);
  212. linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
  213. exit.correctJump(linkBuffer);
  214. if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max())
  215. codeBlock()->watchpoint(exit.m_watchpointIndex).correctLabels(linkBuffer);
  216. }
  217. #if ENABLE(DETACHED_JIT)
  218. #pragma message "[SECURE JSCORE] profiler not supported"
  219. #else
  220. if (m_graph.m_compilation) {
  221. ASSERT(m_exitSiteLabels.size() == codeBlock()->numberOfOSRExits());
  222. for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
  223. Vector<Label>& labels = m_exitSiteLabels[i];
  224. Vector<const void*> addresses;
  225. for (unsigned j = 0; j < labels.size(); ++j)
  226. addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
  227. m_graph.m_compilation->addOSRExitSite(addresses);
  228. }
  229. } else
  230. #endif
  231. {
  232. ASSERT(!m_exitSiteLabels.size());
  233. }
  234. #if ENABLE(DETACHED_JIT)
  235. #pragma message "[SECURE JSCORE] profiler not supported"
  236. #else
  237. codeBlock()->saveCompilation(m_graph.m_compilation);
  238. #endif
  239. }
  240. bool JITCompiler::compile(JITCode& entry)
  241. {
  242. SamplingRegion samplingRegion("DFG Backend");
  243. setStartOfCode();
  244. compileEntry();
  245. SpeculativeJIT speculative(*this);
  246. compileBody(speculative);
  247. setEndOfMainPath();
  248. // Generate slow path code.
  249. speculative.runSlowPathGenerators();
  250. compileExceptionHandlers();
  251. linkOSRExits();
  252. // Create OSR entry trampolines if necessary.
  253. speculative.createOSREntries();
  254. setEndOfCode();
  255. LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
  256. if (linkBuffer.didFailToAllocate())
  257. return false;
  258. link(linkBuffer);
  259. speculative.linkOSREntries(linkBuffer);
  260. codeBlock()->shrinkToFit(CodeBlock::LateShrink);
  261. #if ENABLE(DETACHED_JIT)
  262. #pragma message "[SECURE JSCORE] profiler and disassembler not supported"
  263. #else
  264. if (shouldShowDisassembly())
  265. m_disassembler->dump(linkBuffer);
  266. if (m_graph.m_compilation)
  267. m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
  268. #endif
  269. entry = JITCode(
  270. linkBuffer.finalizeCodeWithoutDisassembly(),
  271. JITCode::DFGJIT);
  272. return true;
  273. }
  274. bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
  275. {
  276. SamplingRegion samplingRegion("DFG Backend");
  277. setStartOfCode();
  278. compileEntry();
  279. // === Function header code generation ===
  280. // This is the main entry point, without performing an arity check.
  281. // If we needed to perform an arity check we will already have moved the return address,
  282. // so enter after this.
  283. Label fromArityCheck(this);
  284. // Plant a check that sufficient space is available in the JSStack.
  285. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
  286. addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
  287. Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
  288. // Return here after stack check.
  289. Label fromStackCheck = label();
  290. // === Function body code generation ===
  291. SpeculativeJIT speculative(*this);
  292. compileBody(speculative);
  293. setEndOfMainPath();
  294. // === Function footer code generation ===
  295. //
  296. // Generate code to perform the slow stack check (if the fast one in
  297. // the function header fails), and generate the entry point with arity check.
  298. //
  299. // Generate the stack check; if the fast check in the function head fails,
  300. // we need to call out to a helper function to check whether more space is available.
  301. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
  302. stackCheck.link(this);
  303. move(stackPointerRegister, GPRInfo::argumentGPR0);
  304. poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
  305. CallBeginToken token;
  306. beginCall(CodeOrigin(0), token);
  307. Call callStackCheck = call();
  308. notifyCall(callStackCheck, CodeOrigin(0), token);
  309. jump(fromStackCheck);
  310. // The fast entry point into a function does not check the correct number of arguments
  311. // have been passed to the call (we only use the fast entry point where we can statically
  312. // determine the correct number of arguments have been passed, or have already checked).
  313. // In cases where an arity check is necessary, we enter here.
  314. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
  315. Label arityCheck = label();
  316. compileEntry();
  317. load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
  318. branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
  319. move(stackPointerRegister, GPRInfo::argumentGPR0);
  320. poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
  321. beginCall(CodeOrigin(0), token);
  322. Call callArityCheck = call();
  323. notifyCall(callArityCheck, CodeOrigin(0), token);
  324. move(GPRInfo::regT0, GPRInfo::callFrameRegister);
  325. jump(fromArityCheck);
  326. // Generate slow path code.
  327. speculative.runSlowPathGenerators();
  328. compileExceptionHandlers();
  329. linkOSRExits();
  330. // Create OSR entry trampolines if necessary.
  331. speculative.createOSREntries();
  332. setEndOfCode();
  333. // === Link ===
  334. LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
  335. if (linkBuffer.didFailToAllocate())
  336. return false;
  337. link(linkBuffer);
  338. speculative.linkOSREntries(linkBuffer);
  339. codeBlock()->shrinkToFit(CodeBlock::LateShrink);
  340. // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs.
  341. linkBuffer.link(callStackCheck, cti_stack_check);
  342. linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
  343. #if ENABLE(DETACHED_JIT)
  344. #pragma message "[SECURE JSCORE] profiler and disassembler not supported"
  345. #else
  346. if (shouldShowDisassembly())
  347. m_disassembler->dump(linkBuffer);
  348. if (m_graph.m_compilation)
  349. m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
  350. #endif
  351. entryWithArityCheck = linkBuffer.locationOf(arityCheck);
  352. entry = JITCode(
  353. linkBuffer.finalizeCodeWithoutDisassembly(),
  354. JITCode::DFGJIT);
  355. return true;
  356. }
  357. #endif // #if !(ENABLE(DETACHED_JIT) && !BUILDING_DETACHED_JIT)
  358. } } // namespace JSC::DFG
  359. #endif // ENABLE(DFG_JIT)