12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830 |
- /*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- #include "config.h"
- #include "DFGByteCodeParser.h"
- #if ENABLE(DFG_JIT)
- #include "ArrayConstructor.h"
- #include "CallLinkStatus.h"
- #include "CodeBlock.h"
- #include "CodeBlockWithJITType.h"
- #include "DFGArrayMode.h"
- #include "DFGCapabilities.h"
- #include "GetByIdStatus.h"
- #include "Operations.h"
- #include "PreciseJumpTargets.h"
- #include "PutByIdStatus.h"
- #include "ResolveGlobalStatus.h"
- #include "StringConstructor.h"
- #include <wtf/CommaPrinter.h>
- #include <wtf/HashMap.h>
- #include <wtf/MathExtras.h>
- namespace JSC { namespace DFG {
- class ConstantBufferKey {
- public:
- ConstantBufferKey()
- : m_codeBlock(0)
- , m_index(0)
- {
- }
-
- ConstantBufferKey(WTF::HashTableDeletedValueType)
- : m_codeBlock(0)
- , m_index(1)
- {
- }
-
- ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
- : m_codeBlock(codeBlock)
- , m_index(index)
- {
- }
-
- bool operator==(const ConstantBufferKey& other) const
- {
- return m_codeBlock == other.m_codeBlock
- && m_index == other.m_index;
- }
-
- unsigned hash() const
- {
- return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
- }
-
- bool isHashTableDeletedValue() const
- {
- return !m_codeBlock && m_index;
- }
-
- CodeBlock* codeBlock() const { return m_codeBlock; }
- unsigned index() const { return m_index; }
-
- private:
- CodeBlock* m_codeBlock;
- unsigned m_index;
- };
- struct ConstantBufferKeyHash {
- static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
- static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
- {
- return a == b;
- }
-
- static const bool safeToCompareToEmptyOrDeleted = true;
- };
- } } // namespace JSC::DFG
- namespace WTF {
- template<typename T> struct DefaultHash;
- template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
- typedef JSC::DFG::ConstantBufferKeyHash Hash;
- };
- template<typename T> struct HashTraits;
- template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
- } // namespace WTF
- namespace JSC { namespace DFG {
- // === ByteCodeParser ===
- //
- // This class is used to compile the dataflow graph from a CodeBlock.
- class ByteCodeParser {
- public:
- ByteCodeParser(Graph& graph)
- : m_vm(&graph.m_vm)
- , m_codeBlock(graph.m_codeBlock)
- , m_profiledBlock(graph.m_profiledBlock)
- , m_graph(graph)
- , m_currentBlock(0)
- , m_currentIndex(0)
- , m_currentProfilingIndex(0)
- , m_constantUndefined(UINT_MAX)
- , m_constantNull(UINT_MAX)
- , m_constantNaN(UINT_MAX)
- , m_constant1(UINT_MAX)
- , m_constants(m_codeBlock->numberOfConstantRegisters())
- , m_numArguments(m_codeBlock->numParameters())
- , m_numLocals(m_codeBlock->m_numCalleeRegisters)
- , m_preservedVars(m_codeBlock->m_numVars)
- , m_parameterSlots(0)
- , m_numPassedVarArgs(0)
- , m_inlineStackTop(0)
- , m_haveBuiltOperandMaps(false)
- , m_emptyJSValueIndex(UINT_MAX)
- , m_currentInstruction(0)
- {
- ASSERT(m_profiledBlock);
-
- for (int i = 0; i < m_codeBlock->m_numVars; ++i)
- m_preservedVars.set(i);
- }
-
- // Parse a full CodeBlock of bytecode.
- bool parse();
-
- private:
- struct InlineStackEntry;
- // Just parse from m_currentIndex to the end of the current CodeBlock.
- void parseCodeBlock();
- // Helper for min and max.
- bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
-
- // Handle calls. This resolves issues surrounding inlining and intrinsics.
- void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
- void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
- void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
- // Handle inlining. Return true if it succeeded, false if we need to plant a call.
- bool handleInlining(bool usesResult, Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
- // Handle setting the result of an intrinsic.
- void setIntrinsicResult(bool usesResult, int resultOperand, Node*);
- // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
- bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
- bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
- Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset);
- void handleGetByOffset(
- int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
- PropertyOffset);
- void handleGetById(
- int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
- const GetByIdStatus&);
- Node* getScope(bool skipTop, unsigned skipCount);
-
- // Convert a set of ResolveOperations into graph nodes
- bool parseResolveOperations(SpeculatedType, unsigned identifierNumber, ResolveOperations*, PutToBaseOperation*, Node** base, Node** value);
- // Prepare to parse a block.
- void prepareToParseBlock();
- // Parse a single basic block of bytecode instructions.
- bool parseBlock(unsigned limit);
- // Link block successors.
- void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
- void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
-
- VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
- {
- ASSERT(operand < FirstConstantRegisterIndex);
-
- m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
- return &m_graph.m_variableAccessData.last();
- }
-
- // Get/Set the operands/result of a bytecode instruction.
- Node* getDirect(int operand)
- {
- // Is this a constant?
- if (operand >= FirstConstantRegisterIndex) {
- unsigned constant = operand - FirstConstantRegisterIndex;
- ASSERT(constant < m_constants.size());
- return getJSConstant(constant);
- }
- ASSERT(operand != JSStack::Callee);
-
- // Is this an argument?
- if (operandIsArgument(operand))
- return getArgument(operand);
- // Must be a local.
- return getLocal((unsigned)operand);
- }
- Node* get(int operand)
- {
- if (operand == JSStack::Callee) {
- if (inlineCallFrame() && inlineCallFrame()->callee)
- return cellConstant(inlineCallFrame()->callee.get());
-
- return getCallee();
- }
-
- return getDirect(m_inlineStackTop->remapOperand(operand));
- }
- enum SetMode { NormalSet, SetOnEntry };
- void setDirect(int operand, Node* value, SetMode setMode = NormalSet)
- {
- // Is this an argument?
- if (operandIsArgument(operand)) {
- setArgument(operand, value, setMode);
- return;
- }
- // Must be a local.
- setLocal((unsigned)operand, value, setMode);
- }
- void set(int operand, Node* value, SetMode setMode = NormalSet)
- {
- setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
- }
-
- void setPair(int operand1, Node* value1, int operand2, Node* value2)
- {
- // First emit dead SetLocals for the benefit of OSR.
- set(operand1, value1);
- set(operand2, value2);
-
- // Now emit the real SetLocals.
- set(operand1, value1);
- set(operand2, value2);
- }
-
- Node* injectLazyOperandSpeculation(Node* node)
- {
- ASSERT(node->op() == GetLocal);
- ASSERT(node->codeOrigin.bytecodeIndex == m_currentIndex);
- SpeculatedType prediction =
- m_inlineStackTop->m_lazyOperands.prediction(
- LazyOperandValueProfileKey(m_currentIndex, node->local()));
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Lazy operand [@", node->index(), ", bc#", m_currentIndex, ", r", node->local(), "] prediction: ", SpeculationDump(prediction), "\n");
- #endif
- node->variableAccessData()->predict(prediction);
- return node;
- }
- // Used in implementing get/set, above, where the operand is a local variable.
- Node* getLocal(unsigned operand)
- {
- Node* node = m_currentBlock->variablesAtTail.local(operand);
- bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
-
- // This has two goals: 1) link together variable access datas, and 2)
- // try to avoid creating redundant GetLocals. (1) is required for
- // correctness - no other phase will ensure that block-local variable
- // access data unification is done correctly. (2) is purely opportunistic
- // and is meant as an compile-time optimization only.
-
- VariableAccessData* variable;
-
- if (node) {
- variable = node->variableAccessData();
- variable->mergeIsCaptured(isCaptured);
-
- if (!isCaptured) {
- switch (node->op()) {
- case GetLocal:
- return node;
- case SetLocal:
- return node->child1().node();
- default:
- break;
- }
- }
- } else {
- m_preservedVars.set(operand);
- variable = newVariableAccessData(operand, isCaptured);
- }
-
- node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
- m_currentBlock->variablesAtTail.local(operand) = node;
- return node;
- }
- void setLocal(unsigned operand, Node* value, SetMode setMode = NormalSet)
- {
- bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
-
- if (setMode == NormalSet) {
- ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
- if (isCaptured || argumentPosition)
- flushDirect(operand, argumentPosition);
- }
- VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
- variableAccessData->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
- Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
- m_currentBlock->variablesAtTail.local(operand) = node;
- }
- // Used in implementing get/set, above, where the operand is an argument.
- Node* getArgument(unsigned operand)
- {
- unsigned argument = operandToArgument(operand);
- ASSERT(argument < m_numArguments);
-
- Node* node = m_currentBlock->variablesAtTail.argument(argument);
- bool isCaptured = m_codeBlock->isCaptured(operand);
- VariableAccessData* variable;
-
- if (node) {
- variable = node->variableAccessData();
- variable->mergeIsCaptured(isCaptured);
-
- switch (node->op()) {
- case GetLocal:
- return node;
- case SetLocal:
- return node->child1().node();
- default:
- break;
- }
- } else
- variable = newVariableAccessData(operand, isCaptured);
-
- node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
- m_currentBlock->variablesAtTail.argument(argument) = node;
- return node;
- }
- void setArgument(int operand, Node* value, SetMode setMode = NormalSet)
- {
- unsigned argument = operandToArgument(operand);
- ASSERT(argument < m_numArguments);
-
- bool isCaptured = m_codeBlock->isCaptured(operand);
- VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
- // Always flush arguments, except for 'this'. If 'this' is created by us,
- // then make sure that it's never unboxed.
- if (argument) {
- if (setMode == NormalSet)
- flushDirect(operand);
- } else if (m_codeBlock->specializationKind() == CodeForConstruct)
- variableAccessData->mergeShouldNeverUnbox(true);
-
- variableAccessData->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
- Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
- m_currentBlock->variablesAtTail.argument(argument) = node;
- }
-
- ArgumentPosition* findArgumentPositionForArgument(int argument)
- {
- InlineStackEntry* stack = m_inlineStackTop;
- while (stack->m_inlineCallFrame)
- stack = stack->m_caller;
- return stack->m_argumentPositions[argument];
- }
-
- ArgumentPosition* findArgumentPositionForLocal(int operand)
- {
- for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
- InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
- if (!inlineCallFrame)
- break;
- if (operand >= static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize))
- continue;
- if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
- continue;
- if (operand < static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize - inlineCallFrame->arguments.size()))
- continue;
- int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
- return stack->m_argumentPositions[argument];
- }
- return 0;
- }
-
- ArgumentPosition* findArgumentPosition(int operand)
- {
- if (operandIsArgument(operand))
- return findArgumentPositionForArgument(operandToArgument(operand));
- return findArgumentPositionForLocal(operand);
- }
-
- void flush(int operand)
- {
- flushDirect(m_inlineStackTop->remapOperand(operand));
- }
-
- void flushDirect(int operand)
- {
- flushDirect(operand, findArgumentPosition(operand));
- }
-
- void flushDirect(int operand, ArgumentPosition* argumentPosition)
- {
- bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
-
- ASSERT(operand < FirstConstantRegisterIndex);
-
- if (!operandIsArgument(operand))
- m_preservedVars.set(operand);
-
- Node* node = m_currentBlock->variablesAtTail.operand(operand);
-
- VariableAccessData* variable;
-
- if (node) {
- variable = node->variableAccessData();
- variable->mergeIsCaptured(isCaptured);
- } else
- variable = newVariableAccessData(operand, isCaptured);
-
- node = addToGraph(Flush, OpInfo(variable));
- m_currentBlock->variablesAtTail.operand(operand) = node;
- if (argumentPosition)
- argumentPosition->addVariable(variable);
- }
- void flush(InlineStackEntry* inlineStackEntry)
- {
- int numArguments;
- if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame)
- numArguments = inlineCallFrame->arguments.size();
- else
- numArguments = inlineStackEntry->m_codeBlock->numParameters();
- for (unsigned argument = numArguments; argument-- > 1;)
- flushDirect(inlineStackEntry->remapOperand(argumentToOperand(argument)));
- for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
- if (!inlineStackEntry->m_codeBlock->isCaptured(local))
- continue;
- flushDirect(inlineStackEntry->remapOperand(local));
- }
- }
- void flushAllArgumentsAndCapturedVariablesInInlineStack()
- {
- for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
- flush(inlineStackEntry);
- }
- void flushArgumentsAndCapturedVariables()
- {
- flush(m_inlineStackTop);
- }
- // Get an operand, and perform a ToInt32/ToNumber conversion on it.
- Node* getToInt32(int operand)
- {
- return toInt32(get(operand));
- }
- // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
- Node* toInt32(Node* node)
- {
- if (node->hasInt32Result())
- return node;
- if (node->op() == UInt32ToNumber)
- return node->child1().node();
- // Check for numeric constants boxed as JSValues.
- if (canFold(node)) {
- JSValue v = valueOfJSConstant(node);
- if (v.isInt32())
- return getJSConstant(node->constantNumber());
- if (v.isNumber())
- return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
- }
- return addToGraph(ValueToInt32, node);
- }
- // NOTE: Only use this to construct constants that arise from non-speculative
- // constant folding. I.e. creating constants using this if we had constant
- // field inference would be a bad idea, since the bytecode parser's folding
- // doesn't handle liveness preservation.
- Node* getJSConstantForValue(JSValue constantValue)
- {
- unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
- if (constantIndex >= m_constants.size())
- m_constants.append(ConstantRecord());
-
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
-
- return getJSConstant(constantIndex);
- }
- Node* getJSConstant(unsigned constant)
- {
- Node* node = m_constants[constant].asJSValue;
- if (node)
- return node;
- Node* result = addToGraph(JSConstant, OpInfo(constant));
- m_constants[constant].asJSValue = result;
- return result;
- }
- Node* getCallee()
- {
- return addToGraph(GetCallee);
- }
- // Helper functions to get/set the this value.
- Node* getThis()
- {
- return get(m_inlineStackTop->m_codeBlock->thisRegister());
- }
- void setThis(Node* value)
- {
- set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
- }
- // Convenience methods for checking nodes for constants.
- bool isJSConstant(Node* node)
- {
- return node->op() == JSConstant;
- }
- bool isInt32Constant(Node* node)
- {
- return isJSConstant(node) && valueOfJSConstant(node).isInt32();
- }
- // Convenience methods for getting constant values.
- JSValue valueOfJSConstant(Node* node)
- {
- ASSERT(isJSConstant(node));
- return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber());
- }
- int32_t valueOfInt32Constant(Node* node)
- {
- ASSERT(isInt32Constant(node));
- return valueOfJSConstant(node).asInt32();
- }
-
- // This method returns a JSConstant with the value 'undefined'.
- Node* constantUndefined()
- {
- // Has m_constantUndefined been set up yet?
- if (m_constantUndefined == UINT_MAX) {
- // Search the constant pool for undefined, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
- if (testMe.isUndefined())
- return getJSConstant(m_constantUndefined);
- }
- // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(jsUndefined());
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
- // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
- return getJSConstant(m_constantUndefined);
- }
- // This method returns a JSConstant with the value 'null'.
- Node* constantNull()
- {
- // Has m_constantNull been set up yet?
- if (m_constantNull == UINT_MAX) {
- // Search the constant pool for null, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
- if (testMe.isNull())
- return getJSConstant(m_constantNull);
- }
- // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(jsNull());
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
- // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
- return getJSConstant(m_constantNull);
- }
- // This method returns a DoubleConstant with the value 1.
- Node* one()
- {
- // Has m_constant1 been set up yet?
- if (m_constant1 == UINT_MAX) {
- // Search the constant pool for the value 1, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
- if (testMe.isInt32() && testMe.asInt32() == 1)
- return getJSConstant(m_constant1);
- }
- // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(jsNumber(1));
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
- // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
- return getJSConstant(m_constant1);
- }
-
- // This method returns a DoubleConstant with the value NaN.
- Node* constantNaN()
- {
- JSValue nan = jsNaN();
-
- // Has m_constantNaN been set up yet?
- if (m_constantNaN == UINT_MAX) {
- // Search the constant pool for the value NaN, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
- if (JSValue::encode(testMe) == JSValue::encode(nan))
- return getJSConstant(m_constantNaN);
- }
- // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- m_codeBlock->addConstant(nan);
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
- // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
- ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
- return getJSConstant(m_constantNaN);
- }
-
- Node* cellConstant(JSCell* cell)
- {
- HashMap<JSCell*, Node*>::AddResult result = m_cellConstantNodes.add(cell, 0);
- if (result.isNewEntry)
- result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
-
- return result.iterator->value;
- }
-
- InlineCallFrame* inlineCallFrame()
- {
- return m_inlineStackTop->m_inlineCallFrame;
- }
- CodeOrigin currentCodeOrigin()
- {
- return CodeOrigin(m_currentIndex, inlineCallFrame(), m_currentProfilingIndex - m_currentIndex);
- }
-
- bool canFold(Node* node)
- {
- return node->isStronglyProvedConstantIn(inlineCallFrame());
- }
- // Our codegen for constant strict equality performs a bitwise comparison,
- // so we can only select values that have a consistent bitwise identity.
- bool isConstantForCompareStrictEq(Node* node)
- {
- if (!node->isConstant())
- return false;
- JSValue value = valueOfJSConstant(node);
- return value.isBoolean() || value.isUndefinedOrNull();
- }
-
- Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
- {
- Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), Edge(child1), Edge(child2), Edge(child3));
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
- }
- Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
- {
- Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), child1, child2, child3);
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
- }
- Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
- {
- Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), info, Edge(child1), Edge(child2), Edge(child3));
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
- }
- Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
- {
- Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), info1, info2,
- Edge(child1), Edge(child2), Edge(child3));
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
- }
-
- Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
- {
- Node* result = m_graph.addNode(
- SpecNone, Node::VarArg, op, currentCodeOrigin(), info1, info2,
- m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
- ASSERT(op != Phi);
- m_currentBlock->append(result);
-
- m_numPassedVarArgs = 0;
-
- return result;
- }
- void addVarArgChild(Node* child)
- {
- m_graph.m_varArgChildren.append(Edge(child));
- m_numPassedVarArgs++;
- }
-
- Node* addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
- {
- Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
- SpeculatedType prediction = SpecNone;
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
- m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
- prediction = getPrediction();
- }
-
- addVarArgChild(get(currentInstruction[1].u.operand));
- int argCount = currentInstruction[2].u.operand;
- if (JSStack::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
- m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
- int registerOffset = currentInstruction[3].u.operand;
- int dummyThisArgument = op == Call ? 0 : 1;
- for (int i = 0 + dummyThisArgument; i < argCount; ++i)
- addVarArgChild(get(registerOffset + argumentToOperand(i)));
- Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
- set(putInstruction[1].u.operand, call);
- return call;
- }
-
- Node* addStructureTransitionCheck(JSCell* object, Structure* structure)
- {
- // Add a weak JS constant for the object regardless, since the code should
- // be jettisoned if the object ever dies.
- Node* objectNode = cellConstant(object);
-
- if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
- addToGraph(StructureTransitionWatchpoint, OpInfo(structure), objectNode);
- return objectNode;
- }
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
-
- return objectNode;
- }
-
- Node* addStructureTransitionCheck(JSCell* object)
- {
- return addStructureTransitionCheck(object, object->structure());
- }
-
- SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
- {
- return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
- }
- SpeculatedType getPrediction(unsigned bytecodeIndex)
- {
- SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
-
- if (prediction == SpecNone) {
- // We have no information about what values this node generates. Give up
- // on executing this code, since we're likely to do more damage than good.
- addToGraph(ForceOSRExit);
- }
-
- return prediction;
- }
-
- SpeculatedType getPredictionWithoutOSRExit()
- {
- return getPredictionWithoutOSRExit(m_currentProfilingIndex);
- }
-
- SpeculatedType getPrediction()
- {
- return getPrediction(m_currentProfilingIndex);
- }
-
- ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
- {
- profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
- return ArrayMode::fromObserved(profile, action, false);
- }
-
- ArrayMode getArrayMode(ArrayProfile* profile)
- {
- return getArrayMode(profile, Array::Read);
- }
-
- ArrayMode getArrayModeAndEmitChecks(ArrayProfile* profile, Array::Action action, Node* base)
- {
- profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
-
- #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- if (m_inlineStackTop->m_profiledBlock->numberOfRareCaseProfiles())
- dataLogF("Slow case profile for bc#%u: %u\n", m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
- dataLogF("Array profile for bc#%u: %p%s%s, %u\n", m_currentIndex, profile->expectedStructure(), profile->structureIsPolymorphic() ? " (polymorphic)" : "", profile->mayInterceptIndexedAccesses() ? " (may intercept)" : "", profile->observedArrayModes());
- #endif
-
- bool makeSafe =
- m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
- || profile->outOfBounds();
-
- ArrayMode result = ArrayMode::fromObserved(profile, action, makeSafe);
-
- if (profile->hasDefiniteStructure()
- && result.benefitsFromStructureCheck()
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(profile->expectedStructure())), base);
-
- return result;
- }
-
- Node* makeSafe(Node* node)
- {
- bool likelyToTakeSlowCase;
- if (!isX86() && node->op() == ArithMod)
- likelyToTakeSlowCase = false;
- else
- likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
-
- if (!likelyToTakeSlowCase
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- return node;
-
- switch (node->op()) {
- case UInt32ToNumber:
- case ArithAdd:
- case ArithSub:
- case ArithNegate:
- case ValueAdd:
- case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
- node->mergeFlags(NodeMayOverflow);
- break;
-
- case ArithMul:
- if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Making ArithMul @%u take deepest slow case.\n", node->index());
- #endif
- node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
- } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Making ArithMul @%u take faster slow case.\n", node->index());
- #endif
- node->mergeFlags(NodeMayNegZero);
- }
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- return node;
- }
-
- Node* makeDivSafe(Node* node)
- {
- ASSERT(node->op() == ArithDiv);
-
- // The main slow case counter for op_div in the old JIT counts only when
- // the operands are not numbers. We don't care about that since we already
- // have speculations in place that take care of that separately. We only
- // care about when the outcome of the division is not an integer, which
- // is what the special fast case counter tells us.
-
- if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- return node;
-
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(node->op()), node->index(), m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
- #endif
-
- // FIXME: It might be possible to make this more granular. The DFG certainly can
- // distinguish between negative zero and overflow in its exit profiles.
- node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
-
- return node;
- }
-
- bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
- {
- if (direct)
- return true;
-
- if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
- return false;
-
- for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
- if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
- return false;
- }
-
- return true;
- }
-
- void buildOperandMapsIfNecessary();
-
- VM* m_vm;
- CodeBlock* m_codeBlock;
- CodeBlock* m_profiledBlock;
- Graph& m_graph;
- // The current block being generated.
- BasicBlock* m_currentBlock;
- // The bytecode index of the current instruction being generated.
- unsigned m_currentIndex;
- // The bytecode index of the value profile of the current instruction being generated.
- unsigned m_currentProfilingIndex;
- // We use these values during code generation, and to avoid the need for
- // special handling we make sure they are available as constants in the
- // CodeBlock's constant pool. These variables are initialized to
- // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
- // constant pool, as necessary.
- unsigned m_constantUndefined;
- unsigned m_constantNull;
- unsigned m_constantNaN;
- unsigned m_constant1;
- HashMap<JSCell*, unsigned> m_cellConstants;
- HashMap<JSCell*, Node*> m_cellConstantNodes;
- // A constant in the constant pool may be represented by more than one
- // node in the graph, depending on the context in which it is being used.
- struct ConstantRecord {
- ConstantRecord()
- : asInt32(0)
- , asNumeric(0)
- , asJSValue(0)
- {
- }
- Node* asInt32;
- Node* asNumeric;
- Node* asJSValue;
- };
- // Track the index of the node whose result is the current value for every
- // register value in the bytecode - argument, local, and temporary.
- Vector<ConstantRecord, 16> m_constants;
- // The number of arguments passed to the function.
- unsigned m_numArguments;
- // The number of locals (vars + temporaries) used in the function.
- unsigned m_numLocals;
- // The set of registers we need to preserve across BasicBlock boundaries;
- // typically equal to the set of vars, but we expand this to cover all
- // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
- BitVector_shared m_preservedVars;
- // The number of slots (in units of sizeof(Register)) that we need to
- // preallocate for calls emanating from this frame. This includes the
- // size of the CallFrame, only if this is not a leaf function. (I.e.
- // this is 0 if and only if this function is a leaf.)
- unsigned m_parameterSlots;
- // The number of var args passed to the next var arg node.
- unsigned m_numPassedVarArgs;
- HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
-
- struct InlineStackEntry {
- ByteCodeParser* m_byteCodeParser;
-
- CodeBlock* m_codeBlock;
- CodeBlock* m_profiledBlock;
- InlineCallFrame* m_inlineCallFrame;
-
- ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
-
- QueryableExitProfile m_exitProfile;
-
- // Remapping of identifier and constant numbers from the code block being
- // inlined (inline callee) to the code block that we're inlining into
- // (the machine code block, which is the transitive, though not necessarily
- // direct, caller).
- Vector_shared<unsigned> m_identifierRemap;
- Vector<unsigned> m_constantRemap;
- Vector<unsigned> m_constantBufferRemap;
-
- // Blocks introduced by this code block, which need successor linking.
- // May include up to one basic block that includes the continuation after
- // the callsite in the caller. These must be appended in the order that they
- // are created, but their bytecodeBegin values need not be in order as they
- // are ignored.
- Vector<UnlinkedBlock> m_unlinkedBlocks;
-
- // Potential block linking targets. Must be sorted by bytecodeBegin, and
- // cannot have two blocks that have the same bytecodeBegin. For this very
- // reason, this is not equivalent to
- Vector<BlockIndex> m_blockLinkingTargets;
-
- // If the callsite's basic block was split into two, then this will be
- // the head of the callsite block. It needs its successors linked to the
- // m_unlinkedBlocks, but not the other way around: there's no way for
- // any blocks in m_unlinkedBlocks to jump back into this block.
- BlockIndex m_callsiteBlockHead;
-
- // Does the callsite block head need linking? This is typically true
- // but will be false for the machine code block's inline stack entry
- // (since that one is not inlined) and for cases where an inline callee
- // did the linking for us.
- bool m_callsiteBlockHeadNeedsLinking;
-
- VirtualRegister m_returnValue;
-
- // Speculations about variable types collected from the profiled code block,
- // which are based on OSR exit profiles that past DFG compilatins of this
- // code block had gathered.
- LazyOperandValueProfileParser m_lazyOperands;
-
- // Did we see any returns? We need to handle the (uncommon but necessary)
- // case where a procedure that does not return was inlined.
- bool m_didReturn;
-
- // Did we have any early returns?
- bool m_didEarlyReturn;
-
- // Pointers to the argument position trackers for this slice of code.
- Vector<ArgumentPosition*> m_argumentPositions;
-
- InlineStackEntry* m_caller;
-
- InlineStackEntry(
- ByteCodeParser*,
- CodeBlock*,
- CodeBlock* profiledBlock,
- BlockIndex callsiteBlockHead,
- JSFunction* callee, // Null if this is a closure call.
- VirtualRegister returnValueVR,
- VirtualRegister inlineCallFrameStart,
- int argumentCountIncludingThis,
- CodeSpecializationKind);
-
- ~InlineStackEntry()
- {
- m_byteCodeParser->m_inlineStackTop = m_caller;
- }
-
- int remapOperand(int operand) const
- {
- if (!m_inlineCallFrame)
- return operand;
-
- if (operand >= FirstConstantRegisterIndex) {
- int result = m_constantRemap[operand - FirstConstantRegisterIndex];
- ASSERT(result >= FirstConstantRegisterIndex);
- return result;
- }
- ASSERT(operand != JSStack::Callee);
- return operand + m_inlineCallFrame->stackOffset;
- }
- };
-
- InlineStackEntry* m_inlineStackTop;
- // Have we built operand maps? We initialize them lazily, and only when doing
- // inlining.
- bool m_haveBuiltOperandMaps;
- // Mapping between identifier names and numbers.
- IdentifierMap m_identifierMap;
- // Mapping between values and constant numbers.
- JSValueMap m_jsValueMap;
- // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
- // work-around for the fact that JSValueMap can't handle "empty" values.
- unsigned m_emptyJSValueIndex;
-
- Instruction* m_currentInstruction;
- };
- #define NEXT_OPCODE(name) \
- m_currentIndex += OPCODE_LENGTH(name); \
- continue
- #define LAST_OPCODE(name) \
- m_currentIndex += OPCODE_LENGTH(name); \
- return shouldContinueParsing
- void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
- {
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
-
- Node* callTarget = get(currentInstruction[1].u.operand);
-
- CallLinkStatus callLinkStatus;
- if (m_graph.isConstant(callTarget))
- callLinkStatus = CallLinkStatus(m_graph.valueOfJSConstant(callTarget)).setIsProved(true);
- else {
- callLinkStatus = CallLinkStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex);
- callLinkStatus.setHasBadFunctionExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction));
- callLinkStatus.setHasBadCacheExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
- callLinkStatus.setHasBadExecutableExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadExecutable));
- }
-
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("For call at bc#", m_currentIndex, ": ", callLinkStatus, "\n");
- #endif
-
- if (!callLinkStatus.canOptimize()) {
- // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
- // that we cannot optimize them.
-
- addCall(interpreter, currentInstruction, op);
- return;
- }
-
- int argumentCountIncludingThis = currentInstruction[2].u.operand;
- int registerOffset = currentInstruction[3].u.operand;
- // Do we have a result?
- bool usesResult = false;
- int resultOperand = 0; // make compiler happy
- unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
- Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
- SpeculatedType prediction = SpecNone;
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
- resultOperand = putInstruction[1].u.operand;
- usesResult = true;
- m_currentProfilingIndex = nextOffset;
- prediction = getPrediction();
- nextOffset += OPCODE_LENGTH(op_call_put_result);
- }
- if (InternalFunction* function = callLinkStatus.internalFunction()) {
- if (handleConstantInternalFunction(usesResult, resultOperand, function, registerOffset, argumentCountIncludingThis, prediction, kind)) {
- // This phantoming has to be *after* the code for the intrinsic, to signify that
- // the inputs must be kept alive whatever exits the intrinsic may do.
- addToGraph(Phantom, callTarget);
- emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
- return;
- }
-
- // Can only handle this using the generic call handler.
- addCall(interpreter, currentInstruction, op);
- return;
- }
-
- Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind);
- if (intrinsic != NoIntrinsic) {
- emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind);
-
- if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
- // This phantoming has to be *after* the code for the intrinsic, to signify that
- // the inputs must be kept alive whatever exits the intrinsic may do.
- addToGraph(Phantom, callTarget);
- emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
- #if ENABLE(DETACHED_JIT)
- #pragma message "[SECURE JSCORE] profiling not supported"
- #else
- if (m_graph.m_compilation)
- m_graph.m_compilation->noticeInlinedCall();
- #endif
- return;
- }
- } else if (handleInlining(usesResult, callTarget, resultOperand, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
- #if ENABLE(DETACHED_JIT)
- #pragma message "[SECURE JSCORE] profiling not supported"
- #else
- if (m_graph.m_compilation)
- m_graph.m_compilation->noticeInlinedCall();
- #endif
- return;
- }
-
- addCall(interpreter, currentInstruction, op);
- }
- void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
- {
- Node* thisArgument;
- if (kind == CodeForCall)
- thisArgument = get(registerOffset + argumentToOperand(0));
- else
- thisArgument = 0;
- if (callLinkStatus.isProved()) {
- addToGraph(Phantom, callTarget, thisArgument);
- return;
- }
-
- ASSERT(callLinkStatus.canOptimize());
-
- if (JSFunction* function = callLinkStatus.function())
- addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument);
- else {
- ASSERT(callLinkStatus.structure());
- ASSERT(callLinkStatus.executable());
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
- addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
- }
- }
- void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
- {
- for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
- addToGraph(Phantom, get(registerOffset + argumentToOperand(i)));
- }
- bool ByteCodeParser::handleInlining(bool usesResult, Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
- {
- // First, the really simple checks: do we have an actual JS function?
- if (!callLinkStatus.executable())
- return false;
- if (callLinkStatus.executable()->isHostFunction())
- return false;
-
- FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
-
- // Does the number of arguments we're passing match the arity of the target? We currently
- // inline only if the number of arguments passed is greater than or equal to the number
- // arguments expected.
- if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
- return false;
-
- // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
- // If either of these are detected, then don't inline.
- unsigned depth = 0;
- for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
- ++depth;
- if (depth >= Options::maximumInliningDepth())
- return false; // Depth exceeded.
-
- if (entry->executable() == executable)
- return false; // Recursion detected.
- }
-
- // Do we have a code block, and does the code block's size match the heuristics/requirements for
- // being an inline candidate? We might not have a code block if code was thrown away or if we
- // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
- // if we had a static proof of what was being called; this might happen for example if you call a
- // global function, where watchpointing gives us static information. Overall, it's a rare case
- // because we expect that any hot callees would have already been compiled.
- CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
- if (!codeBlock)
- return false;
- if (!canInlineFunctionFor(codeBlock, kind, callLinkStatus.isClosureCall()))
- return false;
-
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Inlining executable %p.\n", executable);
- #endif
-
- // Now we know without a doubt that we are committed to inlining. So begin the process
- // by checking the callee (if necessary) and making sure that arguments and the callee
- // are flushed.
- emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind);
-
- // FIXME: Don't flush constants!
-
- int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - JSStack::CallFrameHeaderSize;
-
- // Make sure that the area used by the call frame is reserved.
- for (int arg = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
- m_preservedVars.set(arg);
-
- // Make sure that we have enough locals.
- unsigned newNumLocals = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
- if (newNumLocals > m_numLocals) {
- m_numLocals = newNumLocals;
- for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
- m_graph.m_blocks[i]->ensureLocals(newNumLocals);
- }
-
- size_t argumentPositionStart = m_graph.m_argumentPositions.size();
- InlineStackEntry inlineStackEntry(
- this, codeBlock, codeBlock, m_graph.m_blocks.size() - 1,
- callLinkStatus.function(), (VirtualRegister)m_inlineStackTop->remapOperand(
- usesResult ? resultOperand : InvalidVirtualRegister),
- (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
-
- // This is where the actual inlining really happens.
- unsigned oldIndex = m_currentIndex;
- unsigned oldProfilingIndex = m_currentProfilingIndex;
- m_currentIndex = 0;
- m_currentProfilingIndex = 0;
- addToGraph(InlineStart, OpInfo(argumentPositionStart));
- if (callLinkStatus.isClosureCall()) {
- addToGraph(SetCallee, callTargetNode);
- addToGraph(SetMyScope, addToGraph(GetScope, callTargetNode));
- }
-
- parseCodeBlock();
-
- m_currentIndex = oldIndex;
- m_currentProfilingIndex = oldProfilingIndex;
-
- // If the inlined code created some new basic blocks, then we have linking to do.
- if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
-
- ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
- if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
- linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
- else
- ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
-
- // It's possible that the callsite block head is not owned by the caller.
- if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
- // It's definitely owned by the caller, because the caller created new blocks.
- // Assert that this all adds up.
- ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
- ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
- inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
- } else {
- // It's definitely not owned by the caller. Tell the caller that he does not
- // need to link his callsite block head, because we did it for him.
- ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
- ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
- inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
- }
-
- linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
- } else
- ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
-
- BasicBlock* lastBlock = m_graph.m_blocks.last().get();
- // If there was a return, but no early returns, then we're done. We allow parsing of
- // the caller to continue in whatever basic block we're in right now.
- if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
- ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
-
- // If we created new blocks then the last block needs linking, but in the
- // caller. It doesn't need to be linked to, but it needs outgoing links.
- if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
- #endif
- // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
- // for release builds because this block will never serve as a potential target
- // in the linker's binary search.
- lastBlock->bytecodeBegin = m_currentIndex;
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
- }
-
- m_currentBlock = m_graph.m_blocks.last().get();
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
- #endif
- return true;
- }
-
- // If we get to this point then all blocks must end in some sort of terminals.
- ASSERT(lastBlock->last()->isTerminal());
-
- // Link the early returns to the basic block we're about to create.
- for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
- if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
- continue;
- BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
- ASSERT(!block->isLinked);
- Node* node = block->last();
- ASSERT(node->op() == Jump);
- ASSERT(node->takenBlockIndex() == NoBlock);
- node->setTakenBlockIndex(m_graph.m_blocks.size());
- inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
- #if !ASSERT_DISABLED
- block->isLinked = true;
- #endif
- }
-
- // Need to create a new basic block for the continuation at the caller.
- OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
- #endif
- m_currentBlock = block.get();
- ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
- m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
- m_graph.m_blocks.append(block.release());
- prepareToParseBlock();
-
- // At this point we return and continue to generate code for the caller, but
- // in the new basic block.
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Done inlining executable %p, continuing code generation in new block.\n", executable);
- #endif
- return true;
- }
- void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, Node* node)
- {
- if (!usesResult)
- return;
- set(resultOperand, node);
- }
- bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
- {
- if (argumentCountIncludingThis == 1) { // Math.min()
- setIntrinsicResult(usesResult, resultOperand, constantNaN());
- return true;
- }
-
- if (argumentCountIncludingThis == 2) { // Math.min(x)
- Node* result = get(registerOffset + argumentToOperand(1));
- addToGraph(Phantom, Edge(result, NumberUse));
- setIntrinsicResult(usesResult, resultOperand, result);
- return true;
- }
-
- if (argumentCountIncludingThis == 3) { // Math.min(x, y)
- setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
- return true;
- }
-
- // Don't handle >=3 arguments for now.
- return false;
- }
- // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
- // they need to perform the ToNumber conversion, which can have side-effects.
- bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
- {
- switch (intrinsic) {
- case AbsIntrinsic: {
- if (argumentCountIncludingThis == 1) { // Math.abs()
- setIntrinsicResult(usesResult, resultOperand, constantNaN());
- return true;
- }
- if (!MacroAssembler::supportsFloatingPointAbs())
- return false;
- Node* node = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
- if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- node->mergeFlags(NodeMayOverflow);
- setIntrinsicResult(usesResult, resultOperand, node);
- return true;
- }
- case MinIntrinsic:
- return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
-
- case MaxIntrinsic:
- return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
-
- case SqrtIntrinsic: {
- if (argumentCountIncludingThis == 1) { // Math.sqrt()
- setIntrinsicResult(usesResult, resultOperand, constantNaN());
- return true;
- }
-
- if (!MacroAssembler::supportsFloatingPointSqrt())
- return false;
-
- setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
- return true;
- }
-
- case ArrayPushIntrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
-
- ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
- if (!arrayMode.isJSArray())
- return false;
- switch (arrayMode.type()) {
- case Array::Undecided:
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous:
- case Array::ArrayStorage: {
- Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
- if (usesResult)
- set(resultOperand, arrayPush);
-
- return true;
- }
-
- default:
- return false;
- }
- }
-
- case ArrayPopIntrinsic: {
- if (argumentCountIncludingThis != 1)
- return false;
-
- ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
- if (!arrayMode.isJSArray())
- return false;
- switch (arrayMode.type()) {
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous:
- case Array::ArrayStorage: {
- Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
- if (usesResult)
- set(resultOperand, arrayPop);
- return true;
- }
-
- default:
- return false;
- }
- }
- case CharCodeAtIntrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
- int thisOperand = registerOffset + argumentToOperand(0);
- int indexOperand = registerOffset + argumentToOperand(1);
- Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
- if (usesResult)
- set(resultOperand, charCode);
- return true;
- }
- case CharAtIntrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
- int thisOperand = registerOffset + argumentToOperand(0);
- int indexOperand = registerOffset + argumentToOperand(1);
- Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
- if (usesResult)
- set(resultOperand, charCode);
- return true;
- }
- case FromCharCodeIntrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
- int indexOperand = registerOffset + argumentToOperand(1);
- Node* charCode = addToGraph(StringFromCharCode, getToInt32(indexOperand));
- if (usesResult)
- set(resultOperand, charCode);
- return true;
- }
- case RegExpExecIntrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
-
- Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
- if (usesResult)
- set(resultOperand, regExpExec);
-
- return true;
- }
-
- case RegExpTestIntrinsic: {
- if (argumentCountIncludingThis != 2)
- return false;
-
- Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
- if (usesResult)
- set(resultOperand, regExpExec);
-
- return true;
- }
- case IMulIntrinsic: {
- if (argumentCountIncludingThis != 3)
- return false;
- int leftOperand = registerOffset + argumentToOperand(1);
- int rightOperand = registerOffset + argumentToOperand(2);
- Node* left = getToInt32(leftOperand);
- Node* right = getToInt32(rightOperand);
- setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithIMul, left, right));
- return true;
- }
-
- default:
- return false;
- }
- }
- bool ByteCodeParser::handleConstantInternalFunction(
- bool usesResult, int resultOperand, InternalFunction* function, int registerOffset,
- int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
- {
- // If we ever find that we have a lot of internal functions that we specialize for,
- // then we should probably have some sort of hashtable dispatch, or maybe even
- // dispatch straight through the MethodTable of the InternalFunction. But for now,
- // it seems that this case is hit infrequently enough, and the number of functions
- // we know about is small enough, that having just a linear cascade of if statements
- // is good enough.
-
- UNUSED_PARAM(prediction); // Remove this once we do more things.
-
- if (function->classInfo() == &ArrayConstructor::s_info) {
- if (argumentCountIncludingThis == 2) {
- setIntrinsicResult(
- usesResult, resultOperand,
- addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(registerOffset + argumentToOperand(1))));
- return true;
- }
-
- for (int i = 1; i < argumentCountIncludingThis; ++i)
- addVarArgChild(get(registerOffset + argumentToOperand(i)));
- setIntrinsicResult(
- usesResult, resultOperand,
- addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
- return true;
- } else if (function->classInfo() == &StringConstructor::s_info) {
- Node* result;
-
- if (argumentCountIncludingThis <= 1)
- result = cellConstant(m_vm->smallStrings.emptyString());
- else
- result = addToGraph(ToString, get(registerOffset + argumentToOperand(1)));
-
- if (kind == CodeForConstruct)
- result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
-
- setIntrinsicResult(usesResult, resultOperand, result);
- return true;
- }
-
- return false;
- }
- Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset)
- {
- Node* propertyStorage;
- if (isInlineOffset(offset))
- propertyStorage = base;
- else
- propertyStorage = addToGraph(GetButterfly, base);
- // FIXME: It would be far more efficient for load elimination (and safer from
- // an OSR standpoint) if GetByOffset also referenced the object we were loading
- // from, and if we could load eliminate a GetByOffset even if the butterfly
- // had changed. That would be a great success.
- Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage);
- StorageAccessData storageAccessData;
- storageAccessData.offset = indexRelativeToBase(offset);
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
- return getByOffset;
- }
- void ByteCodeParser::handleGetByOffset(
- int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
- PropertyOffset offset)
- {
- set(destinationOperand, handleGetByOffset(prediction, base, identifierNumber, offset));
- }
- void ByteCodeParser::handleGetById(
- int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
- const GetByIdStatus& getByIdStatus)
- {
- if (!getByIdStatus.isSimple()
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache)) {
- set(destinationOperand,
- addToGraph(
- getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
- OpInfo(identifierNumber), OpInfo(prediction), base));
- return;
- }
-
- ASSERT(getByIdStatus.structureSet().size());
-
- // The implementation of GetByOffset does not know to terminate speculative
- // execution if it doesn't have a prediction, so we do it manually.
- if (prediction == SpecNone)
- addToGraph(ForceOSRExit);
- #if ENABLE(DETACHED_JIT)
- #pragma message "[SECURE JSCORE] profiler is disabled"
- #else
- else if (m_graph.m_compilation)
- m_graph.m_compilation->noticeInlinedGetById();
- #endif
-
- Node* originalBaseForBaselineJIT = base;
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
-
- if (!getByIdStatus.chain().isEmpty()) {
- Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
- JSObject* currentObject = 0;
- for (unsigned i = 0; i < getByIdStatus.chain().size(); ++i) {
- currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
- currentStructure = getByIdStatus.chain()[i];
- base = addStructureTransitionCheck(currentObject, currentStructure);
- }
- }
-
- // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
- // ensure that the base of the original get_by_id is kept alive until we're done with
- // all of the speculations. We only insert the Phantom if there had been a CheckStructure
- // on something other than the base following the CheckStructure on base, or if the
- // access was compiled to a WeakJSConstant specific value, in which case we might not
- // have any explicit use of the base at all.
- if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
- addToGraph(Phantom, originalBaseForBaselineJIT);
-
- if (getByIdStatus.specificValue()) {
- ASSERT(getByIdStatus.specificValue().isCell());
-
- set(destinationOperand, cellConstant(getByIdStatus.specificValue().asCell()));
- return;
- }
-
- handleGetByOffset(
- destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset());
- }
- void ByteCodeParser::prepareToParseBlock()
- {
- for (unsigned i = 0; i < m_constants.size(); ++i)
- m_constants[i] = ConstantRecord();
- m_cellConstantNodes.clear();
- }
- Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
- {
- Node* localBase;
- if (inlineCallFrame() && !inlineCallFrame()->isClosureCall()) {
- ASSERT(inlineCallFrame()->callee);
- localBase = cellConstant(inlineCallFrame()->callee->scope());
- } else
- localBase = addToGraph(GetMyScope);
- if (skipTop) {
- ASSERT(!inlineCallFrame());
- localBase = addToGraph(SkipTopScope, localBase);
- }
- for (unsigned n = skipCount; n--;)
- localBase = addToGraph(SkipScope, localBase);
- return localBase;
- }
- bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction, unsigned identifier, ResolveOperations* resolveOperations, PutToBaseOperation* putToBaseOperation, Node** base, Node** value)
- {
- if (resolveOperations->isEmpty()) {
- addToGraph(ForceOSRExit);
- return false;
- }
- JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
- int skipCount = 0;
- bool skipTop = false;
- bool skippedScopes = false;
- bool setBase = false;
- ResolveOperation* pc = resolveOperations->data();
- Node* localBase = 0;
- bool resolvingBase = true;
- while (resolvingBase) {
- switch (pc->m_operation) {
- case ResolveOperation::ReturnGlobalObjectAsBase:
- *base = cellConstant(globalObject);
- ASSERT(!value);
- return true;
- case ResolveOperation::SetBaseToGlobal:
- *base = cellConstant(globalObject);
- setBase = true;
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::SetBaseToUndefined:
- *base = constantUndefined();
- setBase = true;
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::SetBaseToScope:
- localBase = getScope(skipTop, skipCount);
- *base = localBase;
- setBase = true;
- resolvingBase = false;
- // Reset the scope skipping as we've already loaded it
- skippedScopes = false;
- ++pc;
- break;
- case ResolveOperation::ReturnScopeAsBase:
- *base = getScope(skipTop, skipCount);
- ASSERT(!value);
- return true;
- case ResolveOperation::SkipTopScopeNode:
- ASSERT(!inlineCallFrame());
- skipTop = true;
- skippedScopes = true;
- ++pc;
- break;
- case ResolveOperation::SkipScopes:
- skipCount += pc->m_scopesToSkip;
- skippedScopes = true;
- ++pc;
- break;
- case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
- return false;
- case ResolveOperation::Fail:
- return false;
- default:
- resolvingBase = false;
- }
- }
- if (skippedScopes)
- localBase = getScope(skipTop, skipCount);
- if (base && !setBase)
- *base = localBase;
- ASSERT(value);
- ResolveOperation* resolveValueOperation = pc;
- switch (resolveValueOperation->m_operation) {
- case ResolveOperation::GetAndReturnGlobalProperty: {
- ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex, resolveValueOperation, m_codeBlock->identifier(identifier));
- if (status.isSimple()) {
- ASSERT(status.structure());
- Node* globalObjectNode = addStructureTransitionCheck(globalObject, status.structure());
- if (status.specificValue()) {
- ASSERT(status.specificValue().isCell());
- *value = cellConstant(status.specificValue().asCell());
- } else
- *value = handleGetByOffset(prediction, globalObjectNode, identifier, status.offset());
- return true;
- }
- Node* resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
- m_graph.m_resolveGlobalData.append(ResolveGlobalData());
- ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
- data.identifierNumber = identifier;
- data.resolveOperations = resolveOperations;
- data.putToBaseOperation = putToBaseOperation;
- data.resolvePropertyIndex = resolveValueOperation - resolveOperations->data();
- *value = resolve;
- return true;
- }
- case ResolveOperation::GetAndReturnGlobalVar: {
- *value = addToGraph(
- GetGlobalVar,
- OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)),
- OpInfo(prediction));
- return true;
- }
- case ResolveOperation::GetAndReturnGlobalVarWatchable: {
- SpeculatedType prediction = getPrediction();
- JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
- Identifier ident = m_codeBlock->identifier(identifier);
- SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl());
- if (!entry.couldBeWatched()) {
- *value = addToGraph(GetGlobalVar, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(prediction));
- return true;
- }
- // The watchpoint is still intact! This means that we will get notified if the
- // current value in the global variable changes. So, we can inline that value.
- // Moreover, currently we can assume that this value is a JSFunction*, which
- // implies that it's a cell. This simplifies things, since in general we'd have
- // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
- // of having both cases we just assert that the value is a cell.
- // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
- // register pointer. But CSE tracks effects on global variables by comparing
- // register pointers. Because CSE executes multiple times while the backend
- // executes once, we use the following performance trade-off:
- // - The node refers directly to the register pointer to make CSE super cheap.
- // - To perform backend code generation, the node only contains the identifier
- // number, from which it is possible to get (via a few average-time O(1)
- // lookups) to the WatchpointSet.
- addToGraph(GlobalVarWatchpoint, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(identifier));
- JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
- ASSERT(specificValue.isCell());
- *value = cellConstant(specificValue.asCell());
- return true;
- }
- case ResolveOperation::GetAndReturnScopedVar: {
- Node* getScopeRegisters = addToGraph(GetScopeRegisters, localBase);
- *value = addToGraph(GetScopedVar, OpInfo(resolveValueOperation->m_offset), OpInfo(prediction), getScopeRegisters);
- return true;
- }
- default:
- CRASH();
- return false;
- }
- }
- bool ByteCodeParser::parseBlock(unsigned limit)
- {
- bool shouldContinueParsing = true;
- Interpreter* interpreter = m_vm->interpreter;
- Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
- unsigned blockBegin = m_currentIndex;
-
- // If we are the first basic block, introduce markers for arguments. This allows
- // us to track if a use of an argument may use the actual argument passed, as
- // opposed to using a value we set explicitly.
- if (m_currentBlock == m_graph.m_blocks[0].get() && !inlineCallFrame()) {
- m_graph.m_arguments.resize(m_numArguments);
- for (unsigned argument = 0; argument < m_numArguments; ++argument) {
- VariableAccessData* variable = newVariableAccessData(
- argumentToOperand(argument), m_codeBlock->isCaptured(argumentToOperand(argument)));
- variable->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
-
- Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
- m_graph.m_arguments[argument] = setArgument;
- m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
- }
- }
- while (true) {
- m_currentProfilingIndex = m_currentIndex;
- // Don't extend over jump destinations.
- if (m_currentIndex == limit) {
- // Ordinarily we want to plant a jump. But refuse to do this if the block is
- // empty. This is a special case for inlining, which might otherwise create
- // some empty blocks in some cases. When parseBlock() returns with an empty
- // block, it will get repurposed instead of creating a new one. Note that this
- // logic relies on every bytecode resulting in one or more nodes, which would
- // be true anyway except for op_loop_hint, which emits a Phantom to force this
- // to be true.
- if (!m_currentBlock->isEmpty())
- addToGraph(Jump, OpInfo(m_currentIndex));
- else {
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
- #endif
- }
- return shouldContinueParsing;
- }
-
- // Switch on the current bytecode opcode.
- Instruction* currentInstruction = instructionsBegin + m_currentIndex;
- m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
- OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
-
- #if ENABLE(DETACHED_JIT)
- #pragma message "[SECURE JSCORE] profiler not supported"
- #else
- if (m_graph.m_compilation && opcodeID != op_call_put_result) {
- addToGraph(CountExecution, OpInfo(m_graph.m_compilation->executionCounterFor(
- Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
- }
- #endif
- switch (opcodeID) {
- // === Function entry opcodes ===
- case op_enter:
- // Initialize all locals to undefined.
- for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
- set(i, constantUndefined(), SetOnEntry);
- NEXT_OPCODE(op_enter);
- case op_convert_this: {
- Node* op1 = getThis();
- if (op1->op() != ConvertThis) {
- ValueProfile* profile =
- m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(m_currentProfilingIndex);
- profile->computeUpdatedPrediction();
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("[bc#%u]: profile %p: ", m_currentProfilingIndex, profile);
- profile->dump(WTF::dataFile());
- dataLogF("\n");
- #endif
- if (profile->m_singletonValueIsTop
- || !profile->m_singletonValue
- || !profile->m_singletonValue.isCell()
- || profile->m_singletonValue.asCell()->classInfo() != &Structure::s_info)
- setThis(addToGraph(ConvertThis, op1));
- else {
- addToGraph(
- CheckStructure,
- OpInfo(m_graph.addStructureSet(jsCast<Structure*>(profile->m_singletonValue.asCell()))),
- op1);
- }
- }
- NEXT_OPCODE(op_convert_this);
- }
- case op_create_this: {
- int calleeOperand = currentInstruction[2].u.operand;
- Node* callee = get(calleeOperand);
- bool alreadyEmitted = false;
- if (callee->op() == WeakJSConstant) {
- JSCell* cell = callee->weakConstant();
- ASSERT(cell->inherits(&JSFunction::s_info));
-
- JSFunction* function = jsCast<JSFunction*>(cell);
- ObjectAllocationProfile* allocationProfile = function->tryGetAllocationProfile();
- if (allocationProfile) {
- addToGraph(AllocationProfileWatchpoint, OpInfo(function));
- // The callee is still live up to this point.
- addToGraph(Phantom, callee);
- set(currentInstruction[1].u.operand,
- addToGraph(NewObject, OpInfo(allocationProfile->structure())));
- alreadyEmitted = true;
- }
- }
- if (!alreadyEmitted)
- set(currentInstruction[1].u.operand,
- addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
- NEXT_OPCODE(op_create_this);
- }
- case op_new_object: {
- set(currentInstruction[1].u.operand,
- addToGraph(NewObject,
- OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
- NEXT_OPCODE(op_new_object);
- }
-
- case op_new_array: {
- int startOperand = currentInstruction[2].u.operand;
- int numOperands = currentInstruction[3].u.operand;
- ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
- for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
- addVarArgChild(get(operandIdx));
- set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
- NEXT_OPCODE(op_new_array);
- }
-
- case op_new_array_with_size: {
- int lengthOperand = currentInstruction[2].u.operand;
- ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
- set(currentInstruction[1].u.operand, addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(lengthOperand)));
- NEXT_OPCODE(op_new_array_with_size);
- }
-
- case op_new_array_buffer: {
- int startConstant = currentInstruction[2].u.operand;
- int numConstants = currentInstruction[3].u.operand;
- ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
- NewArrayBufferData data;
- data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
- data.numConstants = numConstants;
- data.indexingType = profile->selectIndexingType();
- // If this statement has never executed, we'll have the wrong indexing type in the profile.
- for (int i = 0; i < numConstants; ++i) {
- data.indexingType =
- leastUpperBoundOfIndexingTypeAndValue(
- data.indexingType,
- m_codeBlock->constantBuffer(data.startConstant)[i]);
- }
-
- m_graph.m_newArrayBufferData.append(data);
- set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
- NEXT_OPCODE(op_new_array_buffer);
- }
-
- case op_new_regexp: {
- set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
- NEXT_OPCODE(op_new_regexp);
- }
-
- case op_get_callee: {
- ValueProfile* profile = currentInstruction[2].u.profile;
- profile->computeUpdatedPrediction();
- if (profile->m_singletonValueIsTop
- || !profile->m_singletonValue
- || !profile->m_singletonValue.isCell())
- set(currentInstruction[1].u.operand, get(JSStack::Callee));
- else {
- ASSERT(profile->m_singletonValue.asCell()->inherits(&JSFunction::s_info));
- Node* actualCallee = get(JSStack::Callee);
- addToGraph(CheckFunction, OpInfo(profile->m_singletonValue.asCell()), actualCallee);
- set(currentInstruction[1].u.operand, addToGraph(WeakJSConstant, OpInfo(profile->m_singletonValue.asCell())));
- }
- NEXT_OPCODE(op_get_callee);
- }
- // === Bitwise operations ===
- case op_bitand: {
- Node* op1 = getToInt32(currentInstruction[2].u.operand);
- Node* op2 = getToInt32(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
- NEXT_OPCODE(op_bitand);
- }
- case op_bitor: {
- Node* op1 = getToInt32(currentInstruction[2].u.operand);
- Node* op2 = getToInt32(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
- NEXT_OPCODE(op_bitor);
- }
- case op_bitxor: {
- Node* op1 = getToInt32(currentInstruction[2].u.operand);
- Node* op2 = getToInt32(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
- NEXT_OPCODE(op_bitxor);
- }
- case op_rshift: {
- Node* op1 = getToInt32(currentInstruction[2].u.operand);
- Node* op2 = getToInt32(currentInstruction[3].u.operand);
- Node* result;
- // Optimize out shifts by zero.
- if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
- result = op1;
- else
- result = addToGraph(BitRShift, op1, op2);
- set(currentInstruction[1].u.operand, result);
- NEXT_OPCODE(op_rshift);
- }
- case op_lshift: {
- Node* op1 = getToInt32(currentInstruction[2].u.operand);
- Node* op2 = getToInt32(currentInstruction[3].u.operand);
- Node* result;
- // Optimize out shifts by zero.
- if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
- result = op1;
- else
- result = addToGraph(BitLShift, op1, op2);
- set(currentInstruction[1].u.operand, result);
- NEXT_OPCODE(op_lshift);
- }
- case op_urshift: {
- Node* op1 = getToInt32(currentInstruction[2].u.operand);
- Node* op2 = getToInt32(currentInstruction[3].u.operand);
- Node* result;
- // The result of a zero-extending right shift is treated as an unsigned value.
- // This means that if the top bit is set, the result is not in the int32 range,
- // and as such must be stored as a double. If the shift amount is a constant,
- // we may be able to optimize.
- if (isInt32Constant(op2)) {
- // If we know we are shifting by a non-zero amount, then since the operation
- // zero fills we know the top bit of the result must be zero, and as such the
- // result must be within the int32 range. Conversely, if this is a shift by
- // zero, then the result may be changed by the conversion to unsigned, but it
- // is not necessary to perform the shift!
- if (valueOfInt32Constant(op2) & 0x1f)
- result = addToGraph(BitURShift, op1, op2);
- else
- result = makeSafe(addToGraph(UInt32ToNumber, op1));
- } else {
- // Cannot optimize at this stage; shift & potentially rebox as a double.
- result = addToGraph(BitURShift, op1, op2);
- result = makeSafe(addToGraph(UInt32ToNumber, result));
- }
- set(currentInstruction[1].u.operand, result);
- NEXT_OPCODE(op_urshift);
- }
- // === Increment/Decrement opcodes ===
- case op_inc: {
- unsigned srcDst = currentInstruction[1].u.operand;
- Node* op = get(srcDst);
- set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
- NEXT_OPCODE(op_inc);
- }
- case op_dec: {
- unsigned srcDst = currentInstruction[1].u.operand;
- Node* op = get(srcDst);
- set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
- NEXT_OPCODE(op_dec);
- }
- // === Arithmetic operations ===
- case op_add: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (op1->hasNumberResult() && op2->hasNumberResult())
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
- else
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
- NEXT_OPCODE(op_add);
- }
- case op_sub: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
- NEXT_OPCODE(op_sub);
- }
- case op_negate: {
- Node* op1 = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
- NEXT_OPCODE(op_negate);
- }
- case op_mul: {
- // Multiply requires that the inputs are not truncated, unfortunately.
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
- NEXT_OPCODE(op_mul);
- }
- case op_mod: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
- NEXT_OPCODE(op_mod);
- }
- case op_div: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
- NEXT_OPCODE(op_div);
- }
- // === Misc operations ===
- #if ENABLE(DEBUG_WITH_BREAKPOINT)
- case op_debug:
- addToGraph(Breakpoint);
- NEXT_OPCODE(op_debug);
- #endif
- case op_mov: {
- Node* op = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, op);
- NEXT_OPCODE(op_mov);
- }
- case op_check_has_instance:
- addToGraph(CheckHasInstance, get(currentInstruction[3].u.operand));
- NEXT_OPCODE(op_check_has_instance);
- case op_instanceof: {
- Node* value = get(currentInstruction[2].u.operand);
- Node* prototype = get(currentInstruction[3].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, prototype));
- NEXT_OPCODE(op_instanceof);
- }
-
- case op_is_undefined: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
- NEXT_OPCODE(op_is_undefined);
- }
- case op_is_boolean: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
- NEXT_OPCODE(op_is_boolean);
- }
- case op_is_number: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
- NEXT_OPCODE(op_is_number);
- }
- case op_is_string: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsString, value));
- NEXT_OPCODE(op_is_string);
- }
- case op_is_object: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
- NEXT_OPCODE(op_is_object);
- }
- case op_is_function: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
- NEXT_OPCODE(op_is_function);
- }
- case op_not: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
- NEXT_OPCODE(op_not);
- }
-
- case op_to_primitive: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
- NEXT_OPCODE(op_to_primitive);
- }
-
- case op_strcat: {
- int startOperand = currentInstruction[2].u.operand;
- int numOperands = currentInstruction[3].u.operand;
- #if CPU(X86)
- // X86 doesn't have enough registers to compile MakeRope with three arguments.
- // Rather than try to be clever, we just make MakeRope dumber on this processor.
- const unsigned maxRopeArguments = 2;
- #else
- const unsigned maxRopeArguments = 3;
- #endif
- OwnArrayPtr<Node*> toStringNodes = adoptArrayPtr(new Node*[numOperands]);
- for (int i = 0; i < numOperands; i++)
- toStringNodes[i] = addToGraph(ToString, get(startOperand + i));
- for (int i = 0; i < numOperands; i++)
- addToGraph(Phantom, toStringNodes[i]);
- Node* operands[AdjacencyList::Size];
- unsigned indexInOperands = 0;
- for (unsigned i = 0; i < AdjacencyList::Size; ++i)
- operands[i] = 0;
- for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
- if (indexInOperands == maxRopeArguments) {
- operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
- for (unsigned i = 1; i < AdjacencyList::Size; ++i)
- operands[i] = 0;
- indexInOperands = 1;
- }
-
- ASSERT(indexInOperands < AdjacencyList::Size);
- ASSERT(indexInOperands < maxRopeArguments);
- operands[indexInOperands++] = toStringNodes[operandIdx];
- }
- set(currentInstruction[1].u.operand,
- addToGraph(MakeRope, operands[0], operands[1], operands[2]));
- NEXT_OPCODE(op_strcat);
- }
- case op_less: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(a.asNumber() < b.asNumber())));
- NEXT_OPCODE(op_less);
- }
- }
- set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
- NEXT_OPCODE(op_less);
- }
- case op_lesseq: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(a.asNumber() <= b.asNumber())));
- NEXT_OPCODE(op_lesseq);
- }
- }
- set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
- NEXT_OPCODE(op_lesseq);
- }
- case op_greater: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(a.asNumber() > b.asNumber())));
- NEXT_OPCODE(op_greater);
- }
- }
- set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
- NEXT_OPCODE(op_greater);
- }
- case op_greatereq: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(a.asNumber() >= b.asNumber())));
- NEXT_OPCODE(op_greatereq);
- }
- }
- set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
- NEXT_OPCODE(op_greatereq);
- }
- case op_eq: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_eq);
- }
- set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
- NEXT_OPCODE(op_eq);
- }
- case op_eq_null: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(CompareEqConstant, value, constantNull()));
- NEXT_OPCODE(op_eq_null);
- }
- case op_stricteq: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_stricteq);
- }
- if (isConstantForCompareStrictEq(op1))
- set(currentInstruction[1].u.operand, addToGraph(CompareStrictEqConstant, op2, op1));
- else if (isConstantForCompareStrictEq(op2))
- set(currentInstruction[1].u.operand, addToGraph(CompareStrictEqConstant, op1, op2));
- else
- set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
- NEXT_OPCODE(op_stricteq);
- }
- case op_neq: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(!JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_neq);
- }
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
- NEXT_OPCODE(op_neq);
- }
- case op_neq_null: {
- Node* value = get(currentInstruction[2].u.operand);
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull())));
- NEXT_OPCODE(op_neq_null);
- }
- case op_nstricteq: {
- Node* op1 = get(currentInstruction[2].u.operand);
- Node* op2 = get(currentInstruction[3].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(currentInstruction[1].u.operand,
- getJSConstantForValue(jsBoolean(!JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_nstricteq);
- }
- Node* invertedResult;
- if (isConstantForCompareStrictEq(op1))
- invertedResult = addToGraph(CompareStrictEqConstant, op2, op1);
- else if (isConstantForCompareStrictEq(op2))
- invertedResult = addToGraph(CompareStrictEqConstant, op1, op2);
- else
- invertedResult = addToGraph(CompareStrictEq, op1, op2);
- set(currentInstruction[1].u.operand, addToGraph(LogicalNot, invertedResult));
- NEXT_OPCODE(op_nstricteq);
- }
- // === Property access operations ===
- case op_get_by_val: {
- SpeculatedType prediction = getPrediction();
-
- Node* base = get(currentInstruction[2].u.operand);
- ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Read, base);
- Node* property = get(currentInstruction[3].u.operand);
- Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
- set(currentInstruction[1].u.operand, getByVal);
- NEXT_OPCODE(op_get_by_val);
- }
- case op_put_by_val: {
- Node* base = get(currentInstruction[1].u.operand);
- ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Write, base);
-
- Node* property = get(currentInstruction[2].u.operand);
- Node* value = get(currentInstruction[3].u.operand);
-
- addVarArgChild(base);
- addVarArgChild(property);
- addVarArgChild(value);
- addVarArgChild(0); // Leave room for property storage.
- addToGraph(Node::VarArg, PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
- NEXT_OPCODE(op_put_by_val);
- }
-
- case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_array_length: {
- SpeculatedType prediction = getPrediction();
-
- Node* base = get(currentInstruction[2].u.operand);
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
-
- Identifier identifier = m_codeBlock->identifier(identifierNumber);
- GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
-
- handleGetById(
- currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
- NEXT_OPCODE(op_get_by_id);
- }
- case op_put_by_id:
- case op_put_by_id_out_of_line:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line: {
- Node* value = get(currentInstruction[3].u.operand);
- Node* base = get(currentInstruction[1].u.operand);
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
- bool direct = currentInstruction[8].u.operand;
- PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock,
- m_currentIndex,
- m_codeBlock->identifier(identifierNumber));
- bool canCountAsInlined = true;
- if (!putByIdStatus.isSet()) {
- addToGraph(ForceOSRExit);
- canCountAsInlined = false;
- }
-
- bool hasExitSite =
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache);
-
- if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
- Node* propertyStorage;
- if (isInlineOffset(putByIdStatus.offset()))
- propertyStorage = base;
- else
- propertyStorage = addToGraph(GetButterfly, base);
- addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
- } else if (!hasExitSite
- && putByIdStatus.isSimpleTransition()
- && structureChainIsStillValid(
- direct,
- putByIdStatus.oldStructure(),
- putByIdStatus.structureChain())) {
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
- if (!direct) {
- if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) {
- addStructureTransitionCheck(
- putByIdStatus.oldStructure()->storedPrototype().asCell());
- }
-
- for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
- JSValue prototype = (*it)->storedPrototype();
- if (prototype.isNull())
- continue;
- ASSERT(prototype.isCell());
- addStructureTransitionCheck(prototype.asCell());
- }
- }
- ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
-
- Node* propertyStorage;
- StructureTransitionData* transitionData =
- m_graph.addStructureTransitionData(
- StructureTransitionData(
- putByIdStatus.oldStructure(),
- putByIdStatus.newStructure()));
- if (putByIdStatus.oldStructure()->outOfLineCapacity()
- != putByIdStatus.newStructure()->outOfLineCapacity()) {
-
- // If we're growing the property storage then it must be because we're
- // storing into the out-of-line storage.
- ASSERT(!isInlineOffset(putByIdStatus.offset()));
-
- if (!putByIdStatus.oldStructure()->outOfLineCapacity()) {
- propertyStorage = addToGraph(
- AllocatePropertyStorage, OpInfo(transitionData), base);
- } else {
- propertyStorage = addToGraph(
- ReallocatePropertyStorage, OpInfo(transitionData),
- base, addToGraph(GetButterfly, base));
- }
- } else {
- if (isInlineOffset(putByIdStatus.offset()))
- propertyStorage = base;
- else
- propertyStorage = addToGraph(GetButterfly, base);
- }
-
- addToGraph(PutStructure, OpInfo(transitionData), base);
-
- addToGraph(
- PutByOffset,
- OpInfo(m_graph.m_storageAccessData.size()),
- propertyStorage,
- base,
- value);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
- } else {
- if (direct)
- addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
- else
- addToGraph(PutById, OpInfo(identifierNumber), base, value);
- canCountAsInlined = false;
- }
- #if ENABLE(DETACHED_JIT)
- #pragma message "[SECURE JSCORE] profiling disabled"
- #else
- if (canCountAsInlined && m_graph.m_compilation)
- m_graph.m_compilation->noticeInlinedPutById();
- #endif
- NEXT_OPCODE(op_put_by_id);
- }
- case op_init_global_const_nop: {
- NEXT_OPCODE(op_init_global_const_nop);
- }
- case op_init_global_const: {
- Node* value = get(currentInstruction[2].u.operand);
- addToGraph(
- PutGlobalVar,
- OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
- value);
- NEXT_OPCODE(op_init_global_const);
- }
- case op_init_global_const_check: {
- Node* value = get(currentInstruction[2].u.operand);
- CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
- JSGlobalObject* globalObject = codeBlock->globalObject();
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand];
- Identifier identifier = m_codeBlock->identifier(identifierNumber);
- SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
- if (!entry.couldBeWatched()) {
- addToGraph(
- PutGlobalVar,
- OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
- value);
- NEXT_OPCODE(op_init_global_const_check);
- }
- addToGraph(
- PutGlobalVarCheck,
- OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
- OpInfo(identifierNumber),
- value);
- NEXT_OPCODE(op_init_global_const_check);
- }
- // === Block terminators. ===
- case op_jmp: {
- unsigned relativeOffset = currentInstruction[1].u.operand;
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jmp);
- }
- case op_jtrue: {
- unsigned relativeOffset = currentInstruction[2].u.operand;
- Node* condition = get(currentInstruction[1].u.operand);
- if (canFold(condition)) {
- TriState state = valueOfJSConstant(condition).pureToBoolean();
- if (state == TrueTriState) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jtrue);
- } else if (state == FalseTriState) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jtrue);
- }
- }
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
- LAST_OPCODE(op_jtrue);
- }
- case op_jfalse: {
- unsigned relativeOffset = currentInstruction[2].u.operand;
- Node* condition = get(currentInstruction[1].u.operand);
- if (canFold(condition)) {
- TriState state = valueOfJSConstant(condition).pureToBoolean();
- if (state == FalseTriState) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jfalse);
- } else if (state == TrueTriState) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jfalse);
- }
- }
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
- LAST_OPCODE(op_jfalse);
- }
- case op_jeq_null: {
- unsigned relativeOffset = currentInstruction[2].u.operand;
- Node* value = get(currentInstruction[1].u.operand);
- Node* condition = addToGraph(CompareEqConstant, value, constantNull());
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
- LAST_OPCODE(op_jeq_null);
- }
- case op_jneq_null: {
- unsigned relativeOffset = currentInstruction[2].u.operand;
- Node* value = get(currentInstruction[1].u.operand);
- Node* condition = addToGraph(CompareEqConstant, value, constantNull());
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
- LAST_OPCODE(op_jneq_null);
- }
- case op_jless: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a < b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jless);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jless);
- }
- }
- }
- Node* condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
- LAST_OPCODE(op_jless);
- }
- case op_jlesseq: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a <= b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jlesseq);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jlesseq);
- }
- }
- }
- Node* condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
- LAST_OPCODE(op_jlesseq);
- }
- case op_jgreater: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a > b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jgreater);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jgreater);
- }
- }
- }
- Node* condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
- LAST_OPCODE(op_jgreater);
- }
- case op_jgreatereq: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a >= b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jgreatereq);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jgreatereq);
- }
- }
- }
- Node* condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
- LAST_OPCODE(op_jgreatereq);
- }
- case op_jnless: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a < b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jnless);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jnless);
- }
- }
- }
- Node* condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
- LAST_OPCODE(op_jnless);
- }
- case op_jnlesseq: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a <= b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jnlesseq);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jnlesseq);
- }
- }
- }
- Node* condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
- LAST_OPCODE(op_jnlesseq);
- }
- case op_jngreater: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a > b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jngreater);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jngreater);
- }
- }
- }
- Node* condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
- LAST_OPCODE(op_jngreater);
- }
- case op_jngreatereq: {
- unsigned relativeOffset = currentInstruction[3].u.operand;
- Node* op1 = get(currentInstruction[1].u.operand);
- Node* op2 = get(currentInstruction[2].u.operand);
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a >= b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jngreatereq);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jngreatereq);
- }
- }
- }
- Node* condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
- LAST_OPCODE(op_jngreatereq);
- }
- case op_ret:
- flushArgumentsAndCapturedVariables();
- if (inlineCallFrame()) {
- if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
- setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
- m_inlineStackTop->m_didReturn = true;
- if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
- // If we're returning from the first block, then we're done parsing.
- ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
- shouldContinueParsing = false;
- LAST_OPCODE(op_ret);
- } else {
- // If inlining created blocks, and we're doing a return, then we need some
- // special linking.
- ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
- m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
- }
- if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
- ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
- addToGraph(Jump, OpInfo(NoBlock));
- m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
- m_inlineStackTop->m_didEarlyReturn = true;
- }
- LAST_OPCODE(op_ret);
- }
- addToGraph(Return, get(currentInstruction[1].u.operand));
- LAST_OPCODE(op_ret);
-
- case op_end:
- flushArgumentsAndCapturedVariables();
- ASSERT(!inlineCallFrame());
- addToGraph(Return, get(currentInstruction[1].u.operand));
- LAST_OPCODE(op_end);
- case op_throw:
- flushAllArgumentsAndCapturedVariablesInInlineStack();
- addToGraph(Throw, get(currentInstruction[1].u.operand));
- LAST_OPCODE(op_throw);
-
- case op_throw_static_error:
- flushAllArgumentsAndCapturedVariablesInInlineStack();
- addToGraph(ThrowReferenceError);
- LAST_OPCODE(op_throw_static_error);
-
- case op_call:
- handleCall(interpreter, currentInstruction, Call, CodeForCall);
- NEXT_OPCODE(op_call);
-
- case op_construct:
- handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
- NEXT_OPCODE(op_construct);
-
- case op_call_varargs: {
- ASSERT(inlineCallFrame());
- ASSERT(currentInstruction[3].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
- ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
- // It would be cool to funnel this into handleCall() so that it can handle
- // inlining. But currently that won't be profitable anyway, since none of the
- // uses of call_varargs will be inlineable. So we set this up manually and
- // without inline/intrinsic detection.
-
- Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call_varargs);
-
- SpeculatedType prediction = SpecNone;
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
- m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call_varargs);
- prediction = getPrediction();
- }
-
- addToGraph(CheckArgumentsNotCreated);
-
- unsigned argCount = inlineCallFrame()->arguments.size();
- if (JSStack::CallFrameHeaderSize + argCount > m_parameterSlots)
- m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
-
- addVarArgChild(get(currentInstruction[1].u.operand)); // callee
- addVarArgChild(get(currentInstruction[2].u.operand)); // this
- for (unsigned argument = 1; argument < argCount; ++argument)
- addVarArgChild(get(argumentToOperand(argument)));
-
- Node* call = addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction));
- if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
- set(putInstruction[1].u.operand, call);
-
- NEXT_OPCODE(op_call_varargs);
- }
-
- case op_call_put_result:
- NEXT_OPCODE(op_call_put_result);
-
- case op_jneq_ptr:
- // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
- // support simmer for a while before making it more general, since it's
- // already gnarly enough as it is.
- ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
- addToGraph(
- CheckFunction,
- OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
- get(currentInstruction[1].u.operand));
- addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
- LAST_OPCODE(op_jneq_ptr);
- case op_get_scoped_var: {
- SpeculatedType prediction = getPrediction();
- int dst = currentInstruction[1].u.operand;
- int slot = currentInstruction[2].u.operand;
- int depth = currentInstruction[3].u.operand;
- bool hasTopScope = m_codeBlock->codeType() == FunctionCode && m_inlineStackTop->m_codeBlock->needsFullScopeChain();
- ASSERT(!hasTopScope || depth >= 1);
- Node* scope = getScope(hasTopScope, depth - hasTopScope);
- Node* getScopeRegisters = addToGraph(GetScopeRegisters, scope);
- Node* getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeRegisters);
- set(dst, getScopedVar);
- NEXT_OPCODE(op_get_scoped_var);
- }
- case op_put_scoped_var: {
- int slot = currentInstruction[1].u.operand;
- int depth = currentInstruction[2].u.operand;
- int source = currentInstruction[3].u.operand;
- bool hasTopScope = m_codeBlock->codeType() == FunctionCode && m_inlineStackTop->m_codeBlock->needsFullScopeChain();
- ASSERT(!hasTopScope || depth >= 1);
- Node* scope = getScope(hasTopScope, depth - hasTopScope);
- Node* scopeRegisters = addToGraph(GetScopeRegisters, scope);
- addToGraph(PutScopedVar, OpInfo(slot), scope, scopeRegisters, get(source));
- NEXT_OPCODE(op_put_scoped_var);
- }
- case op_resolve:
- case op_resolve_global_property:
- case op_resolve_global_var:
- case op_resolve_scoped_var:
- case op_resolve_scoped_var_on_top_scope:
- case op_resolve_scoped_var_with_top_scope_check: {
- SpeculatedType prediction = getPrediction();
-
- unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
- ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
- Node* value = 0;
- if (parseResolveOperations(prediction, identifier, operations, 0, 0, &value)) {
- set(currentInstruction[1].u.operand, value);
- NEXT_OPCODE(op_resolve);
- }
- Node* resolve = addToGraph(Resolve, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
- m_graph.m_resolveOperationsData.append(ResolveOperationData());
- ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
- data.identifierNumber = identifier;
- data.resolveOperations = operations;
- set(currentInstruction[1].u.operand, resolve);
- NEXT_OPCODE(op_resolve);
- }
- case op_put_to_base_variable:
- case op_put_to_base: {
- unsigned base = currentInstruction[1].u.operand;
- unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
- unsigned value = currentInstruction[3].u.operand;
- PutToBaseOperation* putToBase = currentInstruction[4].u.putToBaseOperation;
- if (putToBase->m_isDynamic) {
- addToGraph(PutById, OpInfo(identifier), get(base), get(value));
- NEXT_OPCODE(op_put_to_base);
- }
- switch (putToBase->m_kind) {
- case PutToBaseOperation::Uninitialised:
- addToGraph(ForceOSRExit);
- addToGraph(Phantom, get(base));
- addToGraph(Phantom, get(value));
- break;
- case PutToBaseOperation::GlobalVariablePutChecked: {
- CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
- JSGlobalObject* globalObject = codeBlock->globalObject();
- SymbolTableEntry entry = globalObject->symbolTable()->get(m_codeBlock->identifier(identifier).impl());
- if (entry.couldBeWatched()) {
- addToGraph(PutGlobalVarCheck,
- OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
- OpInfo(identifier),
- get(value));
- break;
- }
- }
- case PutToBaseOperation::GlobalVariablePut:
- addToGraph(PutGlobalVar,
- OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
- get(value));
- break;
- case PutToBaseOperation::VariablePut: {
- Node* scope = get(base);
- Node* scopeRegisters = addToGraph(GetScopeRegisters, scope);
- addToGraph(PutScopedVar, OpInfo(putToBase->m_offset), scope, scopeRegisters, get(value));
- break;
- }
- case PutToBaseOperation::GlobalPropertyPut: {
- if (!putToBase->m_structure) {
- addToGraph(ForceOSRExit);
- addToGraph(Phantom, get(base));
- addToGraph(Phantom, get(value));
- NEXT_OPCODE(op_put_to_base);
- }
- Node* baseNode = get(base);
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putToBase->m_structure.get())), baseNode);
- Node* propertyStorage;
- if (isInlineOffset(putToBase->m_offset))
- propertyStorage = baseNode;
- else
- propertyStorage = addToGraph(GetButterfly, baseNode);
- addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, baseNode, get(value));
- StorageAccessData storageAccessData;
- storageAccessData.offset = indexRelativeToBase(putToBase->m_offset);
- storageAccessData.identifierNumber = identifier;
- m_graph.m_storageAccessData.append(storageAccessData);
- break;
- }
- case PutToBaseOperation::Readonly:
- case PutToBaseOperation::Generic:
- addToGraph(PutById, OpInfo(identifier), get(base), get(value));
- }
- NEXT_OPCODE(op_put_to_base);
- }
- case op_resolve_base_to_global:
- case op_resolve_base_to_global_dynamic:
- case op_resolve_base_to_scope:
- case op_resolve_base_to_scope_with_top_scope_check:
- case op_resolve_base: {
- SpeculatedType prediction = getPrediction();
-
- unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- PutToBaseOperation* putToBaseOperation = currentInstruction[5].u.putToBaseOperation;
- Node* base = 0;
- if (parseResolveOperations(prediction, identifier, operations, 0, &base, 0)) {
- set(currentInstruction[1].u.operand, base);
- NEXT_OPCODE(op_resolve_base);
- }
- Node* resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
- m_graph.m_resolveOperationsData.append(ResolveOperationData());
- ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
- data.identifierNumber = identifier;
- data.resolveOperations = operations;
- data.putToBaseOperation = putToBaseOperation;
-
- set(currentInstruction[1].u.operand, resolve);
- NEXT_OPCODE(op_resolve_base);
- }
- case op_resolve_with_base: {
- SpeculatedType prediction = getPrediction();
- unsigned baseDst = currentInstruction[1].u.operand;
- unsigned valueDst = currentInstruction[2].u.operand;
- unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- PutToBaseOperation* putToBaseOperation = currentInstruction[5].u.putToBaseOperation;
- Node* base = 0;
- Node* value = 0;
- if (parseResolveOperations(prediction, identifier, operations, putToBaseOperation, &base, &value))
- setPair(baseDst, base, valueDst, value);
- else {
- addToGraph(ForceOSRExit);
- setPair(baseDst, addToGraph(GarbageValue), valueDst, addToGraph(GarbageValue));
- }
- NEXT_OPCODE(op_resolve_with_base);
- }
- case op_resolve_with_this: {
- SpeculatedType prediction = getPrediction();
- unsigned baseDst = currentInstruction[1].u.operand;
- unsigned valueDst = currentInstruction[2].u.operand;
- unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- Node* base = 0;
- Node* value = 0;
- if (parseResolveOperations(prediction, identifier, operations, 0, &base, &value))
- setPair(baseDst, base, valueDst, value);
- else {
- addToGraph(ForceOSRExit);
- setPair(baseDst, addToGraph(GarbageValue), valueDst, addToGraph(GarbageValue));
- }
- NEXT_OPCODE(op_resolve_with_this);
- }
- case op_loop_hint: {
- // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
- // OSR can only happen at basic block boundaries. Assert that these two statements
- // are compatible.
- RELEASE_ASSERT(m_currentIndex == blockBegin);
-
- // We never do OSR into an inlined code block. That could not happen, since OSR
- // looks up the code block that is the replacement for the baseline JIT code
- // block. Hence, machine code block = true code block = not inline code block.
- if (!m_inlineStackTop->m_caller)
- m_currentBlock->isOSRTarget = true;
- if (m_vm->watchdog.isEnabled())
- addToGraph(CheckWatchdogTimer);
- else {
- // Emit a phantom node to ensure that there is a placeholder
- // node for this bytecode op.
- addToGraph(Phantom);
- }
-
- NEXT_OPCODE(op_loop_hint);
- }
-
- case op_init_lazy_reg: {
- set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue()));
- NEXT_OPCODE(op_init_lazy_reg);
- }
-
- case op_create_activation: {
- set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand)));
- NEXT_OPCODE(op_create_activation);
- }
-
- case op_create_arguments: {
- m_graph.m_hasArguments = true;
- Node* createArguments = addToGraph(CreateArguments, get(currentInstruction[1].u.operand));
- set(currentInstruction[1].u.operand, createArguments);
- set(unmodifiedArgumentsRegister(currentInstruction[1].u.operand), createArguments);
- NEXT_OPCODE(op_create_arguments);
- }
-
- case op_tear_off_activation: {
- addToGraph(TearOffActivation, get(currentInstruction[1].u.operand));
- NEXT_OPCODE(op_tear_off_activation);
- }
- case op_tear_off_arguments: {
- m_graph.m_hasArguments = true;
- addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand)), get(currentInstruction[2].u.operand));
- NEXT_OPCODE(op_tear_off_arguments);
- }
-
- case op_get_arguments_length: {
- m_graph.m_hasArguments = true;
- set(currentInstruction[1].u.operand, addToGraph(GetMyArgumentsLengthSafe));
- NEXT_OPCODE(op_get_arguments_length);
- }
-
- case op_get_argument_by_val: {
- m_graph.m_hasArguments = true;
- set(currentInstruction[1].u.operand,
- addToGraph(
- GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
- get(currentInstruction[3].u.operand)));
- NEXT_OPCODE(op_get_argument_by_val);
- }
-
- case op_new_func: {
- if (!currentInstruction[3].u.operand) {
- set(currentInstruction[1].u.operand,
- addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
- } else {
- set(currentInstruction[1].u.operand,
- addToGraph(
- NewFunction,
- OpInfo(currentInstruction[2].u.operand),
- get(currentInstruction[1].u.operand)));
- }
- NEXT_OPCODE(op_new_func);
- }
-
- case op_new_func_exp: {
- set(currentInstruction[1].u.operand,
- addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
- NEXT_OPCODE(op_new_func_exp);
- }
- case op_typeof: {
- set(currentInstruction[1].u.operand,
- addToGraph(TypeOf, get(currentInstruction[2].u.operand)));
- NEXT_OPCODE(op_typeof);
- }
- case op_to_number: {
- set(currentInstruction[1].u.operand,
- addToGraph(Identity, Edge(get(currentInstruction[2].u.operand), NumberUse)));
- NEXT_OPCODE(op_to_number);
- }
- default:
- // Parse failed! This should not happen because the capabilities checker
- // should have caught it.
- RELEASE_ASSERT_NOT_REACHED();
- return false;
- }
- }
- }
- void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
- {
- ASSERT(!block->isLinked);
- ASSERT(!block->isEmpty());
- Node* node = block->last();
- ASSERT(node->isTerminal());
-
- switch (node->op()) {
- case Jump:
- node->setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node->takenBlockIndex()].get(), node->takenBlockIndex());
- #endif
- break;
-
- case Branch:
- node->setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
- node->setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node->notTakenBytecodeOffsetDuringParsing()));
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node->takenBlockIndex()].get(), node->takenBlockIndex(), m_graph.m_blocks[node->notTakenBlockIndex()].get(), node->notTakenBlockIndex());
- #endif
- break;
-
- default:
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Marking basic block %p as linked.\n", block);
- #endif
- break;
- }
-
- #if !ASSERT_DISABLED
- block->isLinked = true;
- #endif
- }
- void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
- {
- for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
- if (unlinkedBlocks[i].m_needsNormalLinking) {
- linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
- unlinkedBlocks[i].m_needsNormalLinking = false;
- }
- }
- }
- void ByteCodeParser::buildOperandMapsIfNecessary()
- {
- if (m_haveBuiltOperandMaps)
- return;
-
- for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
- m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
- for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
- JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
- if (!value)
- m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
- else
- m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
- }
-
- m_haveBuiltOperandMaps = true;
- }
- ByteCodeParser::InlineStackEntry::InlineStackEntry(
- ByteCodeParser* byteCodeParser,
- CodeBlock* codeBlock,
- CodeBlock* profiledBlock,
- BlockIndex callsiteBlockHead,
- JSFunction* callee, // Null if this is a closure call.
- VirtualRegister returnValueVR,
- VirtualRegister inlineCallFrameStart,
- int argumentCountIncludingThis,
- CodeSpecializationKind kind)
- : m_byteCodeParser(byteCodeParser)
- , m_codeBlock(codeBlock)
- , m_profiledBlock(profiledBlock)
- , m_exitProfile(profiledBlock->exitProfile())
- , m_callsiteBlockHead(callsiteBlockHead)
- , m_returnValue(returnValueVR)
- , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
- , m_didReturn(false)
- , m_didEarlyReturn(false)
- , m_caller(byteCodeParser->m_inlineStackTop)
- {
- m_argumentPositions.resize(argumentCountIncludingThis);
- for (int i = 0; i < argumentCountIncludingThis; ++i) {
- byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
- ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
- m_argumentPositions[i] = argumentPosition;
- }
-
- // Track the code-block-global exit sites.
- if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
- byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
- codeBlock->ownerExecutable());
- }
-
- if (m_caller) {
- // Inline case.
- ASSERT(codeBlock != byteCodeParser->m_codeBlock);
- ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
- ASSERT(callsiteBlockHead != NoBlock);
-
- InlineCallFrame inlineCallFrame;
- inlineCallFrame.executable.set(*byteCodeParser->m_vm, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
- inlineCallFrame.stackOffset = inlineCallFrameStart + JSStack::CallFrameHeaderSize;
- if (callee)
- inlineCallFrame.callee.set(*byteCodeParser->m_vm, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
- inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
- inlineCallFrame.arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
- inlineCallFrame.isCall = isCall(kind);
-
- if (inlineCallFrame.caller.inlineCallFrame)
- inlineCallFrame.capturedVars = inlineCallFrame.caller.inlineCallFrame->capturedVars;
- else {
- for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) {
- if (byteCodeParser->m_codeBlock->isCaptured(i))
- inlineCallFrame.capturedVars.set(i);
- }
- }
- for (int i = argumentCountIncludingThis; i--;) {
- if (codeBlock->isCaptured(argumentToOperand(i)))
- inlineCallFrame.capturedVars.set(argumentToOperand(i) + inlineCallFrame.stackOffset);
- }
- for (size_t i = codeBlock->m_numVars; i--;) {
- if (codeBlock->isCaptured(i))
- inlineCallFrame.capturedVars.set(i + inlineCallFrame.stackOffset);
- }
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Current captured variables: ");
- inlineCallFrame.capturedVars.dump(WTF::dataFile());
- dataLogF("\n");
- #endif
-
- byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
- m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
-
- byteCodeParser->buildOperandMapsIfNecessary();
-
- m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
- m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
- m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
- for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
- StringImpl* rep = codeBlock->identifier(i).impl();
- IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
- if (result.isNewEntry)
- byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_vm, rep));
- m_identifierRemap[i] = result.iterator->value;
- }
- for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
- JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
- if (!value) {
- if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
- byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
- byteCodeParser->m_codeBlock->addConstant(JSValue());
- byteCodeParser->m_constants.append(ConstantRecord());
- }
- m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
- continue;
- }
- JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
- if (result.isNewEntry) {
- byteCodeParser->m_codeBlock->addConstant(value);
- byteCodeParser->m_constants.append(ConstantRecord());
- }
- m_constantRemap[i] = result.iterator->value;
- }
- for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
- // If we inline the same code block multiple times, we don't want to needlessly
- // duplicate its constant buffers.
- HashMap<ConstantBufferKey, unsigned>::iterator iter =
- byteCodeParser->m_constantBufferCache.find(ConstantBufferKey(codeBlock, i));
- if (iter != byteCodeParser->m_constantBufferCache.end()) {
- m_constantBufferRemap[i] = iter->value;
- continue;
- }
- Vector_shared<JSValue>& buffer = codeBlock->constantBufferAsVector(i);
- unsigned newIndex = byteCodeParser->m_codeBlock->addConstantBuffer(buffer);
- m_constantBufferRemap[i] = newIndex;
- byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex);
- }
- m_callsiteBlockHeadNeedsLinking = true;
- } else {
- // Machine code block case.
- ASSERT(codeBlock == byteCodeParser->m_codeBlock);
- ASSERT(!callee);
- ASSERT(returnValueVR == InvalidVirtualRegister);
- ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
- ASSERT(callsiteBlockHead == NoBlock);
- m_inlineCallFrame = 0;
- m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
- m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
- m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
- for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
- m_identifierRemap[i] = i;
- for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
- m_constantRemap[i] = i + FirstConstantRegisterIndex;
- for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
- m_constantBufferRemap[i] = i;
- m_callsiteBlockHeadNeedsLinking = false;
- }
-
- for (size_t i = 0; i < m_constantRemap.size(); ++i)
- ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
-
- byteCodeParser->m_inlineStackTop = this;
- }
- void ByteCodeParser::parseCodeBlock()
- {
- CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
-
- #if ENABLE(DETACHED_JIT)
- #pragma message "[SECURE JSCORE] profiling disabled"
- #else
- if (m_graph.m_compilation) {
- m_graph.m_compilation->addProfiledBytecodes(
- *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
- }
- #endif
- bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime();
- #if DFG_ENABLE(DEBUG_VERBOSE)
- shouldDumpBytecode |= true;
- #endif
- if (shouldDumpBytecode) {
- dataLog("Parsing ", *codeBlock);
- if (inlineCallFrame()) {
- dataLog(
- " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT),
- " ", inlineCallFrame()->caller);
- }
- dataLog(
- ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0,
- ", needsFullScopeChain = ", codeBlock->needsFullScopeChain(),
- ", needsActivation = ", codeBlock->ownerExecutable()->needsActivation(),
- ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
- codeBlock->baselineVersion()->dumpBytecode();
- }
-
- Vector<unsigned, 32> jumpTargets;
- computePreciseJumpTargets(codeBlock, jumpTargets);
- if (Options::dumpBytecodeAtDFGTime()) {
- dataLog("Jump targets: ");
- CommaPrinter comma;
- for (unsigned i = 0; i < jumpTargets.size(); ++i)
- dataLog(comma, jumpTargets[i]);
- dataLog("\n");
- }
-
- for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
- // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
- unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog(
- "Parsing bytecode with limit ", pointerDump(inlineCallFrame()),
- " bc#", limit, " at inline depth ",
- CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()), ".\n");
- #endif
- ASSERT(m_currentIndex < limit);
- // Loop until we reach the current limit (i.e. next jump target).
- do {
- if (!m_currentBlock) {
- // Check if we can use the last block.
- if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->isEmpty()) {
- // This must be a block belonging to us.
- ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
- // Either the block is linkable or it isn't. If it's linkable then it's the last
- // block in the blockLinkingTargets list. If it's not then the last block will
- // have a lower bytecode index that the one we're about to give to this block.
- if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin != m_currentIndex) {
- // Make the block linkable.
- ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin < m_currentIndex);
- m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
- }
- // Change its bytecode begin and continue.
- m_currentBlock = m_graph.m_blocks.last().get();
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
- #endif
- m_currentBlock->bytecodeBegin = m_currentIndex;
- } else {
- OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
- #if DFG_ENABLE(DEBUG_VERBOSE)
- dataLogF("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
- #endif
- m_currentBlock = block.get();
- // This assertion checks two things:
- // 1) If the bytecodeBegin is greater than currentIndex, then something has gone
- // horribly wrong. So, we're probably generating incorrect code.
- // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
- // a peephole coalescing of this block in the if statement above. So, we're
- // generating suboptimal code and leaving more work for the CFG simplifier.
- ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
- m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
- m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
- // The first block is definitely an OSR target.
- if (!m_graph.m_blocks.size())
- block->isOSRTarget = true;
- m_graph.m_blocks.append(block.release());
- prepareToParseBlock();
- }
- }
- bool shouldContinueParsing = parseBlock(limit);
- // We should not have gone beyond the limit.
- ASSERT(m_currentIndex <= limit);
-
- // We should have planted a terminal, or we just gave up because
- // we realized that the jump target information is imprecise, or we
- // are at the end of an inline function, or we realized that we
- // should stop parsing because there was a return in the first
- // basic block.
- ASSERT(m_currentBlock->isEmpty() || m_currentBlock->last()->isTerminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
- if (!shouldContinueParsing)
- return;
-
- m_currentBlock = 0;
- } while (m_currentIndex < limit);
- }
- // Should have reached the end of the instructions.
- ASSERT(m_currentIndex == codeBlock->instructions().size());
- }
- bool ByteCodeParser::parse()
- {
- // Set during construction.
- ASSERT(!m_currentIndex);
-
- #if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
- // We should be pretending that the code has an activation.
- ASSERT(m_graph.needsActivation());
- #endif
-
- InlineStackEntry inlineStackEntry(
- this, m_codeBlock, m_profiledBlock, NoBlock, 0, InvalidVirtualRegister, InvalidVirtualRegister,
- m_codeBlock->numParameters(), CodeForCall);
-
- parseCodeBlock();
- linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
- m_graph.determineReachability();
-
- ASSERT(m_preservedVars.size());
- size_t numberOfLocals = 0;
- for (size_t i = m_preservedVars.size(); i--;) {
- if (m_preservedVars.quickGet(i)) {
- numberOfLocals = i + 1;
- break;
- }
- }
-
- for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
- BasicBlock* block = m_graph.m_blocks[blockIndex].get();
- ASSERT(block);
- if (!block->isReachable) {
- m_graph.m_blocks[blockIndex].clear();
- continue;
- }
-
- block->variablesAtHead.ensureLocals(numberOfLocals);
- block->variablesAtTail.ensureLocals(numberOfLocals);
- }
-
- m_graph.m_preservedVars = m_preservedVars;
- m_graph.m_localVars = m_numLocals;
- m_graph.m_parameterSlots = m_parameterSlots;
- return true;
- }
- bool parse(ExecState*, Graph& graph)
- {
- SamplingRegion samplingRegion("DFG Parsing");
- #if DFG_DEBUG_LOCAL_DISBALE
- UNUSED_PARAM(exec);
- UNUSED_PARAM(graph);
- return false;
- #else
- #if OS(ORBIS)
- static int enable_dfg(-1);
- if (enable_dfg == -1) {
- char * enable_dfg_setting(getenv("JSCORE_ENABLE_DFG"));
- enable_dfg = !enable_dfg_setting ? 0 : // if not set, disable
- atoi(enable_dfg_setting) <= 0 ? 0 : // if value <= 0, disable
- 1;
- fprintf(stderr, "[JavaScriptCore] DFG_JIT %s\n", enable_dfg ? "enabled" : "disabled");
- }
- if (!enable_dfg) {
- return false;
- }
- #endif
- return ByteCodeParser(graph).parse();
- #endif
- }
- } } // namespace JSC::DFG
- #endif
|