LowLevelInterpreter.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /*
  2. * Copyright (C) 2012 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #include "config.h"
  26. #include "LowLevelInterpreter.h"
  27. #if ENABLE(LLINT)
  28. #include "LLIntOfflineAsmConfig.h"
  29. #include <wtf/InlineASM.h>
  30. #if ENABLE(LLINT_C_LOOP)
  31. #include "CodeBlock.h"
  32. #include "LLIntCLoop.h"
  33. #include "LLIntSlowPaths.h"
  34. #include "Operations.h"
  35. #include "VMInspector.h"
  36. #include <wtf/Assertions.h>
  37. #include <wtf/MathExtras.h>
  38. using namespace JSC::LLInt;
  39. // LLInt C Loop opcodes
  40. // ====================
  41. // In the implementation of the C loop, the LLint trampoline glue functions
  42. // (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
  43. // if they are bytecode handlers. That means the names of the trampoline
  44. // functions will be added to the OpcodeID list via the
  45. // FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
  46. // includes.
  47. //
  48. // In addition, some JIT trampoline functions which are needed by LLInt
  49. // (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
  50. // bytecodes, and the CLoop will provide bytecode handlers for them.
  51. //
  52. // In the CLoop, we can only dispatch indirectly to these bytecodes
  53. // (including the LLInt and JIT extensions). All other dispatches
  54. // (i.e. goto's) must be to a known label (i.e. local / global labels).
  55. // How are the opcodes named?
  56. // ==========================
  57. // Here is a table to show examples of how each of the manifestation of the
  58. // opcodes are named:
  59. //
  60. // Type: Opcode Trampoline Glue
  61. // ====== ===============
  62. // [In the llint .asm files]
  63. // llint labels: llint_op_enter llint_program_prologue
  64. //
  65. // OpcodeID: op_enter llint_program
  66. // [in Opcode.h] [in LLIntOpcode.h]
  67. //
  68. // When using a switch statement dispatch in the CLoop, each "opcode" is
  69. // a case statement:
  70. // Opcode: case op_enter: case llint_program_prologue:
  71. //
  72. // When using a computed goto dispatch in the CLoop, each opcode is a label:
  73. // Opcode: op_enter: llint_program_prologue:
  74. //============================================================================
  75. // Define the opcode dispatch mechanism when using the C loop:
  76. //
  77. // These are for building a C Loop interpreter:
  78. #define OFFLINE_ASM_BEGIN
  79. #define OFFLINE_ASM_END
  80. #define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode)
  81. #if ENABLE(COMPUTED_GOTO_OPCODES)
  82. #define OFFLINE_ASM_GLUE_LABEL(label) label:
  83. #else
  84. #define OFFLINE_ASM_GLUE_LABEL(label) case label: label:
  85. #endif
  86. #define OFFLINE_ASM_LOCAL_LABEL(label) label:
  87. //============================================================================
  88. // Some utilities:
  89. //
  90. namespace JSC {
  91. namespace LLInt {
  92. #if USE(JSVALUE32_64)
  93. static double Ints2Double(uint32_t lo, uint32_t hi)
  94. {
  95. union {
  96. double dval;
  97. uint64_t ival64;
  98. } u;
  99. u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
  100. return u.dval;
  101. }
  102. static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
  103. {
  104. union {
  105. double dval;
  106. uint64_t ival64;
  107. } u;
  108. u.dval = val;
  109. hi = static_cast<uint32_t>(u.ival64 >> 32);
  110. lo = static_cast<uint32_t>(u.ival64);
  111. }
  112. #endif // USE(JSVALUE32_64)
  113. } // namespace LLint
  114. //============================================================================
  115. // CLoopRegister is the storage for an emulated CPU register.
  116. // It defines the policy of how ints smaller than intptr_t are packed into the
  117. // pseudo register, as well as hides endianness differences.
  118. struct CLoopRegister {
  119. union {
  120. intptr_t i;
  121. uintptr_t u;
  122. #if USE(JSVALUE64)
  123. #if CPU(BIG_ENDIAN)
  124. struct {
  125. int32_t i32padding;
  126. int32_t i32;
  127. };
  128. struct {
  129. uint32_t u32padding;
  130. uint32_t u32;
  131. };
  132. struct {
  133. int8_t i8padding[7];
  134. int8_t i8;
  135. };
  136. struct {
  137. uint8_t u8padding[7];
  138. uint8_t u8;
  139. };
  140. #else // !CPU(BIG_ENDIAN)
  141. struct {
  142. int32_t i32;
  143. int32_t i32padding;
  144. };
  145. struct {
  146. uint32_t u32;
  147. uint32_t u32padding;
  148. };
  149. struct {
  150. int8_t i8;
  151. int8_t i8padding[7];
  152. };
  153. struct {
  154. uint8_t u8;
  155. uint8_t u8padding[7];
  156. };
  157. #endif // !CPU(BIG_ENDIAN)
  158. #else // !USE(JSVALUE64)
  159. int32_t i32;
  160. uint32_t u32;
  161. #if CPU(BIG_ENDIAN)
  162. struct {
  163. int8_t i8padding[3];
  164. int8_t i8;
  165. };
  166. struct {
  167. uint8_t u8padding[3];
  168. uint8_t u8;
  169. };
  170. #else // !CPU(BIG_ENDIAN)
  171. struct {
  172. int8_t i8;
  173. int8_t i8padding[3];
  174. };
  175. struct {
  176. uint8_t u8;
  177. uint8_t u8padding[3];
  178. };
  179. #endif // !CPU(BIG_ENDIAN)
  180. #endif // !USE(JSVALUE64)
  181. int8_t* i8p;
  182. void* vp;
  183. ExecState* execState;
  184. void* instruction;
  185. NativeFunction nativeFunc;
  186. #if USE(JSVALUE64)
  187. int64_t i64;
  188. uint64_t u64;
  189. EncodedJSValue encodedJSValue;
  190. double castToDouble;
  191. #endif
  192. Opcode opcode;
  193. };
  194. #if USE(JSVALUE64)
  195. inline void clearHighWord() { i32padding = 0; }
  196. #else
  197. inline void clearHighWord() { }
  198. #endif
  199. };
  200. //============================================================================
  201. // The llint C++ interpreter loop:
  202. //
  203. JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
  204. bool isInitializationPass)
  205. {
  206. #define CAST reinterpret_cast
  207. #define SIGN_BIT32(x) ((x) & 0x80000000)
  208. // One-time initialization of our address tables. We have to put this code
  209. // here because our labels are only in scope inside this function. The
  210. // caller (or one of its ancestors) is responsible for ensuring that this
  211. // is only called once during the initialization of the VM before threads
  212. // are at play.
  213. if (UNLIKELY(isInitializationPass)) {
  214. #if ENABLE(COMPUTED_GOTO_OPCODES)
  215. Opcode* opcodeMap = LLInt::opcodeMap();
  216. #define OPCODE_ENTRY(__opcode, length) \
  217. opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
  218. FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
  219. #undef OPCODE_ENTRY
  220. #define LLINT_OPCODE_ENTRY(__opcode, length) \
  221. opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
  222. FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
  223. #undef LLINT_OPCODE_ENTRY
  224. #endif
  225. // Note: we can only set the exceptionInstructions after we have
  226. // initialized the opcodeMap above. This is because getCodePtr()
  227. // can depend on the opcodeMap.
  228. Instruction* exceptionInstructions = LLInt::exceptionInstructions();
  229. for (int i = 0; i < maxOpcodeLength + 1; ++i)
  230. exceptionInstructions[i].u.pointer =
  231. LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
  232. return JSValue();
  233. }
  234. ASSERT(callFrame->vm().topCallFrame == callFrame);
  235. // Define the pseudo registers used by the LLINT C Loop backend:
  236. ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
  237. union CLoopDoubleRegister {
  238. double d;
  239. #if USE(JSVALUE64)
  240. int64_t castToInt64;
  241. #endif
  242. };
  243. // The CLoop llint backend is initially based on the ARMv7 backend, and
  244. // then further enhanced with a few instructions from the x86 backend to
  245. // support building for X64 targets. Hence, the shape of the generated
  246. // code and the usage convention of registers will look a lot like the
  247. // ARMv7 backend's.
  248. //
  249. // For example, on a 32-bit build:
  250. // 1. Outgoing args will be set up as follows:
  251. // arg1 in t0 (r0 on ARM)
  252. // arg2 in t1 (r1 on ARM)
  253. // 2. 32 bit return values will be in t0 (r0 on ARM).
  254. // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
  255. //
  256. // But instead of naming these simulator registers based on their ARM
  257. // counterparts, we'll name them based on their original llint asm names.
  258. // This will make it easier to correlate the generated code with the
  259. // original llint asm code.
  260. //
  261. // On a 64-bit build, it more like x64 in that the registers are 64 bit.
  262. // Hence:
  263. // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
  264. // 2. 32 bit result values will be in the low 32-bit of t0.
  265. // 3. 64 bit result values will be in t0.
  266. CLoopRegister t0, t1, t2, t3;
  267. #if USE(JSVALUE64)
  268. CLoopRegister rBasePC, tagTypeNumber, tagMask;
  269. #endif
  270. CLoopRegister rRetVPC;
  271. CLoopDoubleRegister d0, d1;
  272. // Keep the compiler happy. We don't really need this, but the compiler
  273. // will complain. This makes the warning go away.
  274. t0.i = 0;
  275. t1.i = 0;
  276. // Instantiate the pseudo JIT stack frame used by the LLINT C Loop backend:
  277. JITStackFrame jitStackFrame;
  278. // The llint expects the native stack pointer, sp, to be pointing to the
  279. // jitStackFrame (which is the simulation of the native stack frame):
  280. JITStackFrame* const sp = &jitStackFrame;
  281. sp->vm = &callFrame->vm();
  282. // Set up an alias for the vm ptr in the JITStackFrame:
  283. VM* &vm = sp->vm;
  284. CodeBlock* codeBlock = callFrame->codeBlock();
  285. Instruction* vPC;
  286. // rPC is an alias for vPC. Set up the alias:
  287. CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC);
  288. #if USE(JSVALUE32_64)
  289. vPC = codeBlock->instructions().begin();
  290. #else // USE(JSVALUE64)
  291. vPC = 0;
  292. rBasePC.vp = codeBlock->instructions().begin();
  293. // For the ASM llint, JITStubs takes care of this initialization. We do
  294. // it explicitly here for the C loop:
  295. tagTypeNumber.i = 0xFFFF000000000000;
  296. tagMask.i = 0xFFFF000000000002;
  297. #endif // USE(JSVALUE64)
  298. // cfr is an alias for callFrame. Set up this alias:
  299. CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame);
  300. // Simulate a native return PC which should never be used:
  301. rRetVPC.i = 0xbbadbeef;
  302. // Interpreter variables for value passing between opcodes and/or helpers:
  303. NativeFunction nativeFunc = 0;
  304. JSValue functionReturnValue;
  305. Opcode opcode;
  306. opcode = LLInt::getOpcode(bootstrapOpcodeId);
  307. #if ENABLE(OPCODE_STATS)
  308. #define RECORD_OPCODE_STATS(__opcode) \
  309. OpcodeStats::recordInstruction(__opcode)
  310. #else
  311. #define RECORD_OPCODE_STATS(__opcode)
  312. #endif
  313. #if USE(JSVALUE32_64)
  314. #define FETCH_OPCODE() vPC->u.opcode
  315. #else // USE(JSVALUE64)
  316. #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8)
  317. #endif // USE(JSVALUE64)
  318. #define NEXT_INSTRUCTION() \
  319. do { \
  320. opcode = FETCH_OPCODE(); \
  321. DISPATCH_OPCODE(); \
  322. } while (false)
  323. #if ENABLE(COMPUTED_GOTO_OPCODES)
  324. //========================================================================
  325. // Loop dispatch mechanism using computed goto statements:
  326. #define DISPATCH_OPCODE() goto *opcode
  327. #define DEFINE_OPCODE(__opcode) \
  328. __opcode: \
  329. RECORD_OPCODE_STATS(__opcode);
  330. // Dispatch to the current PC's bytecode:
  331. DISPATCH_OPCODE();
  332. #else // !ENABLE(COMPUTED_GOTO_OPCODES)
  333. //========================================================================
  334. // Loop dispatch mechanism using a C switch statement:
  335. #define DISPATCH_OPCODE() goto dispatchOpcode
  336. #define DEFINE_OPCODE(__opcode) \
  337. case __opcode: \
  338. __opcode: \
  339. RECORD_OPCODE_STATS(__opcode);
  340. // Dispatch to the current PC's bytecode:
  341. dispatchOpcode:
  342. switch (opcode)
  343. #endif // !ENABLE(COMPUTED_GOTO_OPCODES)
  344. //========================================================================
  345. // Bytecode handlers:
  346. {
  347. // This is the file generated by offlineasm, which contains all of the
  348. // bytecode handlers for the interpreter, as compiled from
  349. // LowLevelInterpreter.asm and its peers.
  350. #include "LLIntAssembly.h"
  351. // In the ASM llint, getHostCallReturnValue() is a piece of glue
  352. // function provided by the JIT (see dfg/DFGOperations.cpp).
  353. // We simulate it here with a pseduo-opcode handler.
  354. OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
  355. {
  356. // The ASM part pops the frame:
  357. callFrame = callFrame->callerFrame();
  358. // The part in getHostCallReturnValueWithExecState():
  359. JSValue result = vm->hostCallReturnValue;
  360. #if USE(JSVALUE32_64)
  361. t1.i = result.tag();
  362. t0.i = result.payload();
  363. #else
  364. t0.encodedJSValue = JSValue::encode(result);
  365. #endif
  366. goto doReturnHelper;
  367. }
  368. OFFLINE_ASM_GLUE_LABEL(ctiOpThrowNotCaught)
  369. {
  370. return vm->exception;
  371. }
  372. #if !ENABLE(COMPUTED_GOTO_OPCODES)
  373. default:
  374. ASSERT(false);
  375. #endif
  376. } // END bytecode handler cases.
  377. //========================================================================
  378. // Bytecode helpers:
  379. doReturnHelper: {
  380. ASSERT(!!callFrame);
  381. if (callFrame->hasHostCallFrameFlag()) {
  382. #if USE(JSVALUE32_64)
  383. return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
  384. #else
  385. return JSValue::decode(t0.encodedJSValue);
  386. #endif
  387. }
  388. // The normal ASM llint call implementation returns to the caller as
  389. // recorded in rRetVPC, and the caller would fetch the return address
  390. // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in
  391. // the callTargetFunction() macro in the llint asm files).
  392. //
  393. // For the C loop, we don't have the JIT stub to this work for us.
  394. // So, we need to implement the equivalent of dispatchAfterCall() here
  395. // before dispatching to the PC.
  396. vPC = callFrame->currentVPC();
  397. #if USE(JSVALUE64)
  398. // Based on LowLevelInterpreter64.asm's dispatchAfterCall():
  399. // When returning from a native trampoline call, unlike the assembly
  400. // LLInt, we can't simply return to the caller. In our case, we grab
  401. // the caller's VPC and resume execution there. However, the caller's
  402. // VPC returned by callFrame->currentVPC() is in the form of the real
  403. // address of the target bytecode, but the 64-bit llint expects the
  404. // VPC to be a bytecode offset. Hence, we need to map it back to a
  405. // bytecode offset before we dispatch via the usual dispatch mechanism
  406. // i.e. NEXT_INSTRUCTION():
  407. codeBlock = callFrame->codeBlock();
  408. ASSERT(codeBlock);
  409. rPC.vp = callFrame->currentVPC();
  410. rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin());
  411. rPC.i >>= 3;
  412. rBasePC.vp = codeBlock->instructions().begin();
  413. #endif // USE(JSVALUE64)
  414. NEXT_INSTRUCTION();
  415. } // END doReturnHelper.
  416. // Keep the compiler happy so that it doesn't complain about unused
  417. // labels for the LLInt trampoline glue. The labels are automatically
  418. // emitted by label macros above, and some of them are referenced by
  419. // the llint generated code. Since we can't tell ahead of time which
  420. // will be referenced and which will be not, we'll just passify the
  421. // compiler on all such labels:
  422. #define LLINT_OPCODE_ENTRY(__opcode, length) \
  423. UNUSED_LABEL(__opcode);
  424. FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
  425. #undef LLINT_OPCODE_ENTRY
  426. #undef NEXT_INSTRUCTION
  427. #undef DEFINE_OPCODE
  428. #undef CHECK_FOR_TIMEOUT
  429. #undef CAST
  430. #undef SIGN_BIT32
  431. } // Interpreter::llintCLoopExecute()
  432. } // namespace JSC
  433. #else // !ENABLE(LLINT_C_LOOP)
  434. //============================================================================
  435. // Define the opcode dispatch mechanism when using an ASM loop:
  436. //
  437. // These are for building an interpreter from generated assembly code:
  438. #define OFFLINE_ASM_BEGIN asm (
  439. #define OFFLINE_ASM_END );
  440. #define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode)
  441. #define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode)
  442. #if CPU(ARM_THUMB2)
  443. #define OFFLINE_ASM_GLOBAL_LABEL(label) \
  444. ".globl " SYMBOL_STRING(label) "\n" \
  445. HIDE_SYMBOL(label) "\n" \
  446. ".thumb\n" \
  447. ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
  448. SYMBOL_STRING(label) ":\n"
  449. #else
  450. #define OFFLINE_ASM_GLOBAL_LABEL(label) \
  451. ".globl " SYMBOL_STRING(label) "\n" \
  452. HIDE_SYMBOL(label) "\n" \
  453. SYMBOL_STRING(label) ":\n"
  454. #endif
  455. #define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
  456. // This is a file generated by offlineasm, which contains all of the assembly code
  457. // for the interpreter, as compiled from LowLevelInterpreter.asm.
  458. #include "LLIntAssembly.h"
  459. #endif // !ENABLE(LLINT_C_LOOP)
  460. #endif // ENABLE(LLINT)