DFGOSRExitCompiler64.cpp 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #include "config.h"
  26. #include "DFGOSRExitCompiler.h"
  27. #if ENABLE(DFG_JIT) && USE(JSVALUE64)
  28. #include "DFGOperations.h"
  29. #include "Operations.h"
  30. #include <wtf/DataLog.h>
  31. namespace JSC { namespace DFG {
  32. void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
  33. {
  34. // 1) Pro-forma stuff.
  35. #if DFG_ENABLE(DEBUG_VERBOSE)
  36. dataLogF("OSR exit for (");
  37. for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
  38. dataLogF("bc#%u", codeOrigin.bytecodeIndex);
  39. if (!codeOrigin.inlineCallFrame)
  40. break;
  41. dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
  42. }
  43. dataLogF(") ");
  44. dumpOperands(operands, WTF::dataFile());
  45. #endif
  46. if (Options::printEachOSRExit()) {
  47. SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
  48. debugInfo->codeBlock = m_jit.codeBlock();
  49. m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
  50. }
  51. #if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
  52. m_jit.breakpoint();
  53. #endif
  54. #if DFG_ENABLE(SUCCESS_STATS)
  55. static SamplingCounter counter("SpeculationFailure");
  56. m_jit.emitCount(counter);
  57. #endif
  58. // 2) Perform speculation recovery. This only comes into play when an operation
  59. // starts mutating state before verifying the speculation it has already made.
  60. GPRReg alreadyBoxed = InvalidGPRReg;
  61. if (recovery) {
  62. switch (recovery->type()) {
  63. case SpeculativeAdd:
  64. m_jit.sub32(recovery->src(), recovery->dest());
  65. m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
  66. alreadyBoxed = recovery->dest();
  67. break;
  68. case BooleanSpeculationCheck:
  69. m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
  70. break;
  71. default:
  72. break;
  73. }
  74. }
  75. // 3) Refine some array and/or value profile, if appropriate.
  76. if (!!exit.m_jsValueSource) {
  77. if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
  78. // If the instruction that this originated from has an array profile, then
  79. // refine it. If it doesn't, then do nothing. The latter could happen for
  80. // hoisted checks, or checks emitted for operations that didn't have array
  81. // profiling - either ops that aren't array accesses at all, or weren't
  82. // known to be array acceses in the bytecode. The latter case is a FIXME
  83. // while the former case is an outcome of a CheckStructure not knowing why
  84. // it was emitted (could be either due to an inline cache of a property
  85. // property access, or due to an array profile).
  86. CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
  87. if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
  88. GPRReg usedRegister;
  89. if (exit.m_jsValueSource.isAddress())
  90. usedRegister = exit.m_jsValueSource.base();
  91. else
  92. usedRegister = exit.m_jsValueSource.gpr();
  93. GPRReg scratch1;
  94. GPRReg scratch2;
  95. scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
  96. scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
  97. m_jit.push(scratch1);
  98. m_jit.push(scratch2);
  99. GPRReg value;
  100. if (exit.m_jsValueSource.isAddress()) {
  101. value = scratch1;
  102. m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
  103. } else
  104. value = exit.m_jsValueSource.gpr();
  105. m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
  106. m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
  107. m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
  108. m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
  109. m_jit.lshift32(scratch1, scratch2);
  110. m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
  111. m_jit.pop(scratch2);
  112. m_jit.pop(scratch1);
  113. }
  114. }
  115. if (!!exit.m_valueProfile) {
  116. EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
  117. if (exit.m_jsValueSource.isAddress()) {
  118. // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
  119. // since we know how to restore it.
  120. m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
  121. m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
  122. m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
  123. } else
  124. m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
  125. }
  126. }
  127. // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
  128. // whose destination is now occupied by a DFG virtual register, and we need
  129. // one for every displaced virtual register if there are more than
  130. // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
  131. // any undefined slots, any FPR slots, and any unboxed ints.
  132. Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals());
  133. for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
  134. poisonedVirtualRegisters[i] = false;
  135. unsigned numberOfPoisonedVirtualRegisters = 0;
  136. unsigned numberOfDisplacedVirtualRegisters = 0;
  137. // Booleans for fast checks. We expect that most OSR exits do not have to rebox
  138. // Int32s, have no FPRs, and have no constants. If there are constants, we
  139. // expect most of them to be jsUndefined(); if that's true then we handle that
  140. // specially to minimize code size and execution time.
  141. bool haveUnboxedInt32s = false;
  142. bool haveUnboxedDoubles = false;
  143. bool haveFPRs = false;
  144. bool haveConstants = false;
  145. bool haveUndefined = false;
  146. bool haveUInt32s = false;
  147. bool haveArguments = false;
  148. for (size_t index = 0; index < operands.size(); ++index) {
  149. const ValueRecovery& recovery = operands[index];
  150. switch (recovery.technique()) {
  151. case Int32DisplacedInJSStack:
  152. case DoubleDisplacedInJSStack:
  153. case DisplacedInJSStack:
  154. numberOfDisplacedVirtualRegisters++;
  155. ASSERT((int)recovery.virtualRegister() >= 0);
  156. // See if we might like to store to this virtual register before doing
  157. // virtual register shuffling. If so, we say that the virtual register
  158. // is poisoned: it cannot be stored to until after displaced virtual
  159. // registers are handled. We track poisoned virtual register carefully
  160. // to ensure this happens efficiently. Note that we expect this case
  161. // to be rare, so the handling of it is optimized for the cases in
  162. // which it does not happen.
  163. if (recovery.virtualRegister() < (int)operands.numberOfLocals()) {
  164. switch (operands.local(recovery.virtualRegister()).technique()) {
  165. case InGPR:
  166. case UnboxedInt32InGPR:
  167. case UInt32InGPR:
  168. case InFPR:
  169. if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
  170. poisonedVirtualRegisters[recovery.virtualRegister()] = true;
  171. numberOfPoisonedVirtualRegisters++;
  172. }
  173. break;
  174. default:
  175. break;
  176. }
  177. }
  178. break;
  179. case UnboxedInt32InGPR:
  180. case AlreadyInJSStackAsUnboxedInt32:
  181. haveUnboxedInt32s = true;
  182. break;
  183. case AlreadyInJSStackAsUnboxedDouble:
  184. haveUnboxedDoubles = true;
  185. break;
  186. case UInt32InGPR:
  187. haveUInt32s = true;
  188. break;
  189. case InFPR:
  190. haveFPRs = true;
  191. break;
  192. case Constant:
  193. haveConstants = true;
  194. if (recovery.constant().isUndefined())
  195. haveUndefined = true;
  196. break;
  197. case ArgumentsThatWereNotCreated:
  198. haveArguments = true;
  199. break;
  200. default:
  201. break;
  202. }
  203. }
  204. #if DFG_ENABLE(DEBUG_VERBOSE)
  205. dataLogF(" ");
  206. if (numberOfPoisonedVirtualRegisters)
  207. dataLogF("Poisoned=%u ", numberOfPoisonedVirtualRegisters);
  208. if (numberOfDisplacedVirtualRegisters)
  209. dataLogF("Displaced=%u ", numberOfDisplacedVirtualRegisters);
  210. if (haveUnboxedInt32s)
  211. dataLogF("UnboxedInt32 ");
  212. if (haveUnboxedDoubles)
  213. dataLogF("UnboxedDoubles ");
  214. if (haveUInt32s)
  215. dataLogF("UInt32 ");
  216. if (haveFPRs)
  217. dataLogF("FPR ");
  218. if (haveConstants)
  219. dataLogF("Constants ");
  220. if (haveUndefined)
  221. dataLogF("Undefined ");
  222. dataLogF(" ");
  223. #endif
  224. ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters)));
  225. EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
  226. // From here on, the code assumes that it is profitable to maximize the distance
  227. // between when something is computed and when it is stored.
  228. // 5) Perform all reboxing of integers.
  229. if (haveUnboxedInt32s || haveUInt32s) {
  230. for (size_t index = 0; index < operands.size(); ++index) {
  231. const ValueRecovery& recovery = operands[index];
  232. switch (recovery.technique()) {
  233. case UnboxedInt32InGPR:
  234. if (recovery.gpr() != alreadyBoxed)
  235. m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
  236. break;
  237. case AlreadyInJSStackAsUnboxedInt32:
  238. m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
  239. break;
  240. case UInt32InGPR: {
  241. // This occurs when the speculative JIT left an unsigned 32-bit integer
  242. // in a GPR. If it's positive, we can just box the int. Otherwise we
  243. // need to turn it into a boxed double.
  244. // We don't try to be clever with register allocation here; we assume
  245. // that the program is using FPRs and we don't try to figure out which
  246. // ones it is using. Instead just temporarily save fpRegT0 and then
  247. // restore it. This makes sense because this path is not cheap to begin
  248. // with, and should happen very rarely.
  249. GPRReg addressGPR = GPRInfo::regT0;
  250. if (addressGPR == recovery.gpr())
  251. addressGPR = GPRInfo::regT1;
  252. m_jit.store64(addressGPR, scratchDataBuffer);
  253. m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
  254. m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
  255. AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
  256. DEFINE_REMOTE_VAR_PTR(double, twoToThe32);
  257. m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
  258. m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(REMOTE_VAR_PTR(twoToThe32)), FPRInfo::fpRegT0);
  259. m_jit.boxDouble(FPRInfo::fpRegT0, recovery.gpr());
  260. AssemblyHelpers::Jump done = m_jit.jump();
  261. positive.link(&m_jit);
  262. m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
  263. done.link(&m_jit);
  264. m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
  265. m_jit.load64(scratchDataBuffer, addressGPR);
  266. break;
  267. }
  268. default:
  269. break;
  270. }
  271. }
  272. }
  273. // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
  274. // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
  275. // most OSR failure points will have at least one GPR that needs to be dumped.
  276. initializePoisoned(operands.numberOfLocals());
  277. unsigned currentPoisonIndex = 0;
  278. for (size_t index = 0; index < operands.size(); ++index) {
  279. const ValueRecovery& recovery = operands[index];
  280. int operand = operands.operandForIndex(index);
  281. switch (recovery.technique()) {
  282. case InGPR:
  283. case UnboxedInt32InGPR:
  284. case UInt32InGPR:
  285. if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
  286. m_jit.store64(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
  287. m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
  288. currentPoisonIndex++;
  289. } else
  290. m_jit.store64(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
  291. break;
  292. default:
  293. break;
  294. }
  295. }
  296. // At this point all GPRs are available for scratch use.
  297. if (haveFPRs) {
  298. // 7) Box all doubles (relies on there being more GPRs than FPRs)
  299. for (size_t index = 0; index < operands.size(); ++index) {
  300. const ValueRecovery& recovery = operands[index];
  301. if (recovery.technique() != InFPR)
  302. continue;
  303. FPRReg fpr = recovery.fpr();
  304. GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
  305. m_jit.boxDouble(fpr, gpr);
  306. }
  307. // 8) Dump all doubles into the stack, or to the scratch storage if
  308. // the destination virtual register is poisoned.
  309. for (size_t index = 0; index < operands.size(); ++index) {
  310. const ValueRecovery& recovery = operands[index];
  311. if (recovery.technique() != InFPR)
  312. continue;
  313. GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
  314. if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
  315. m_jit.store64(gpr, scratchDataBuffer + currentPoisonIndex);
  316. m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
  317. currentPoisonIndex++;
  318. } else
  319. m_jit.store64(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
  320. }
  321. }
  322. // At this point all GPRs and FPRs are available for scratch use.
  323. // 9) Box all unboxed doubles in the stack.
  324. if (haveUnboxedDoubles) {
  325. for (size_t index = 0; index < operands.size(); ++index) {
  326. const ValueRecovery& recovery = operands[index];
  327. if (recovery.technique() != AlreadyInJSStackAsUnboxedDouble)
  328. continue;
  329. m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0);
  330. m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
  331. m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
  332. }
  333. }
  334. ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
  335. // 10) Reshuffle displaced virtual registers. Optimize for the case that
  336. // the number of displaced virtual registers is not more than the number
  337. // of available physical registers.
  338. if (numberOfDisplacedVirtualRegisters) {
  339. if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
  340. // So far this appears to be the case that triggers all the time, but
  341. // that is far from guaranteed.
  342. unsigned displacementIndex = 0;
  343. for (size_t index = 0; index < operands.size(); ++index) {
  344. const ValueRecovery& recovery = operands[index];
  345. switch (recovery.technique()) {
  346. case DisplacedInJSStack:
  347. m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
  348. break;
  349. case Int32DisplacedInJSStack: {
  350. GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
  351. m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
  352. m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
  353. break;
  354. }
  355. case DoubleDisplacedInJSStack: {
  356. GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
  357. m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
  358. m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
  359. break;
  360. }
  361. default:
  362. break;
  363. }
  364. }
  365. displacementIndex = 0;
  366. for (size_t index = 0; index < operands.size(); ++index) {
  367. const ValueRecovery& recovery = operands[index];
  368. switch (recovery.technique()) {
  369. case DisplacedInJSStack:
  370. case Int32DisplacedInJSStack:
  371. case DoubleDisplacedInJSStack:
  372. m_jit.store64(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
  373. break;
  374. default:
  375. break;
  376. }
  377. }
  378. } else {
  379. // FIXME: This should use the shuffling algorithm that we use
  380. // for speculative->non-speculative jumps, if we ever discover that
  381. // some hot code with lots of live values that get displaced and
  382. // spilled really enjoys frequently failing speculation.
  383. // For now this code is engineered to be correct but probably not
  384. // super. In particular, it correctly handles cases where for example
  385. // the displacements are a permutation of the destination values, like
  386. //
  387. // 1 -> 2
  388. // 2 -> 1
  389. //
  390. // It accomplishes this by simply lifting all of the virtual registers
  391. // from their old (DFG JIT) locations and dropping them in a scratch
  392. // location in memory, and then transferring from that scratch location
  393. // to their new (old JIT) locations.
  394. unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
  395. for (size_t index = 0; index < operands.size(); ++index) {
  396. const ValueRecovery& recovery = operands[index];
  397. switch (recovery.technique()) {
  398. case DisplacedInJSStack:
  399. m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
  400. m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
  401. break;
  402. case Int32DisplacedInJSStack: {
  403. m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
  404. m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
  405. m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
  406. break;
  407. }
  408. case DoubleDisplacedInJSStack: {
  409. m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
  410. m_jit.sub64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
  411. m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
  412. break;
  413. }
  414. default:
  415. break;
  416. }
  417. }
  418. scratchIndex = numberOfPoisonedVirtualRegisters;
  419. for (size_t index = 0; index < operands.size(); ++index) {
  420. const ValueRecovery& recovery = operands[index];
  421. switch (recovery.technique()) {
  422. case DisplacedInJSStack:
  423. case Int32DisplacedInJSStack:
  424. case DoubleDisplacedInJSStack:
  425. m_jit.load64(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
  426. m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
  427. break;
  428. default:
  429. break;
  430. }
  431. }
  432. ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
  433. }
  434. }
  435. // 11) Dump all poisoned virtual registers.
  436. if (numberOfPoisonedVirtualRegisters) {
  437. for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) {
  438. if (!poisonedVirtualRegisters[virtualRegister])
  439. continue;
  440. const ValueRecovery& recovery = operands.local(virtualRegister);
  441. switch (recovery.technique()) {
  442. case InGPR:
  443. case UnboxedInt32InGPR:
  444. case UInt32InGPR:
  445. case InFPR:
  446. m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
  447. m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
  448. break;
  449. default:
  450. break;
  451. }
  452. }
  453. }
  454. // 12) Dump all constants. Optimize for Undefined, since that's a constant we see
  455. // often.
  456. if (haveConstants) {
  457. if (haveUndefined)
  458. m_jit.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
  459. for (size_t index = 0; index < operands.size(); ++index) {
  460. const ValueRecovery& recovery = operands[index];
  461. if (recovery.technique() != Constant)
  462. continue;
  463. if (recovery.constant().isUndefined())
  464. m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
  465. else
  466. m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
  467. }
  468. }
  469. // 13) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
  470. // that all new calls into this code will go to the new JIT, so the execute
  471. // counter only affects call frames that performed OSR exit and call frames
  472. // that were still executing the old JIT at the time of another call frame's
  473. // OSR exit. We want to ensure that the following is true:
  474. //
  475. // (a) Code the performs an OSR exit gets a chance to reenter optimized
  476. // code eventually, since optimized code is faster. But we don't
  477. // want to do such reentery too aggressively (see (c) below).
  478. //
  479. // (b) If there is code on the call stack that is still running the old
  480. // JIT's code and has never OSR'd, then it should get a chance to
  481. // perform OSR entry despite the fact that we've exited.
  482. //
  483. // (c) Code the performs an OSR exit should not immediately retry OSR
  484. // entry, since both forms of OSR are expensive. OSR entry is
  485. // particularly expensive.
  486. //
  487. // (d) Frequent OSR failures, even those that do not result in the code
  488. // running in a hot loop, result in recompilation getting triggered.
  489. //
  490. // To ensure (c), we'd like to set the execute counter to
  491. // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
  492. // (a) and (b), since then every OSR exit would delay the opportunity for
  493. // every call frame to perform OSR entry. Essentially, if OSR exit happens
  494. // frequently and the function has few loops, then the counter will never
  495. // become non-negative and OSR entry will never be triggered. OSR entry
  496. // will only happen if a loop gets hot in the old JIT, which does a pretty
  497. // good job of ensuring (a) and (b). But that doesn't take care of (d),
  498. // since each speculation failure would reset the execute counter.
  499. // So we check here if the number of speculation failures is significantly
  500. // larger than the number of successes (we want 90% success rate), and if
  501. // there have been a large enough number of failures. If so, we set the
  502. // counter to 0; otherwise we set the counter to
  503. // counterValueForOptimizeAfterWarmUp().
  504. handleExitCounts(exit);
  505. // 14) Reify inlined call frames.
  506. ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
  507. m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
  508. for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
  509. InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
  510. CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
  511. CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
  512. Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
  513. unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
  514. BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
  515. ASSERT(mapping);
  516. ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
  517. void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
  518. GPRReg callerFrameGPR;
  519. if (inlineCallFrame->caller.inlineCallFrame) {
  520. m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
  521. callerFrameGPR = GPRInfo::regT3;
  522. } else
  523. callerFrameGPR = GPRInfo::callFrameRegister;
  524. m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
  525. if (!inlineCallFrame->isClosureCall())
  526. m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
  527. m_jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
  528. m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
  529. m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
  530. if (!inlineCallFrame->isClosureCall())
  531. m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
  532. }
  533. // 15) Create arguments if necessary and place them into the appropriate aliased
  534. // registers.
  535. if (haveArguments) {
  536. HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
  537. NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;
  538. for (size_t index = 0; index < operands.size(); ++index) {
  539. const ValueRecovery& recovery = operands[index];
  540. if (recovery.technique() != ArgumentsThatWereNotCreated)
  541. continue;
  542. int operand = operands.operandForIndex(index);
  543. // Find the right inline call frame.
  544. InlineCallFrame* inlineCallFrame = 0;
  545. for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
  546. current;
  547. current = current->caller.inlineCallFrame) {
  548. if (current->stackOffset <= operand) {
  549. inlineCallFrame = current;
  550. break;
  551. }
  552. }
  553. if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
  554. continue;
  555. int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
  556. if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
  557. // We know this call frame optimized out an arguments object that
  558. // the baseline JIT would have created. Do that creation now.
  559. if (inlineCallFrame) {
  560. m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
  561. m_jit.setupArguments(GPRInfo::regT0);
  562. } else
  563. m_jit.setupArgumentsExecState();
  564. m_jit.move(
  565. AssemblyHelpers::TrustedImmPtr(
  566. bitwise_cast<void*>(operationCreateArguments)),
  567. GPRInfo::nonArgGPR0);
  568. m_jit.call(GPRInfo::nonArgGPR0);
  569. m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
  570. m_jit.store64(
  571. GPRInfo::returnValueGPR,
  572. AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
  573. m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
  574. }
  575. m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
  576. m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
  577. }
  578. }
  579. // 16) Load the result of the last bytecode operation into regT0.
  580. if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
  581. m_jit.load64(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
  582. // 17) Adjust the call frame pointer.
  583. if (exit.m_codeOrigin.inlineCallFrame)
  584. m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
  585. // 18) Jump into the corresponding baseline JIT code.
  586. CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
  587. Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
  588. BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
  589. ASSERT(mapping);
  590. ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
  591. void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
  592. ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
  593. m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT1);
  594. m_jit.jump(GPRInfo::regT1);
  595. #if DFG_ENABLE(DEBUG_VERBOSE)
  596. dataLogF("-> %p\n", jumpTarget);
  597. #endif
  598. }
  599. } } // namespace JSC::DFG
  600. #endif // ENABLE(DFG_JIT) && USE(JSVALUE64)