void treatRootBlock(BasicBlock* block, InsertionSet& insertionSet)
    {
        Operands<VariableAccessData*> initialAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        Operands<Node*> initialAccessNodes(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        for (unsigned i = 0; i < block->size(); i++) {
            Node* node = block->at(i);
            if (!node->hasVariableAccessData(m_graph))
                continue;

            VirtualRegister operand = node->local();
            if (initialAccessData.operand(operand))
                continue;

            DFG_ASSERT(m_graph, node, node->op() != SetLocal); // We should have inserted a Flush before this!
            initialAccessData.operand(operand) = node->variableAccessData();
            initialAccessNodes.operand(operand) = node;
        }

        // We want every Flush to be able to reach backwards to
        // a SetLocal. Doing this in the root block achieves this goal.
        NodeOrigin origin = block->at(0)->origin;
        Node* undefined = insertionSet.insertConstant(0, origin, jsUndefined());

        for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++) {
            VirtualRegister operand = virtualRegisterForLocal(i);
            VariableAccessData* accessData;
            DFG_ASSERT(m_graph, nullptr, initialAccessNodes.operand(operand)->op() == Flush); // We should have inserted a Flush before any SetLocal/SetArgument for the local that we are analyzing now.
            accessData = initialAccessData.operand(operand);
            DFG_ASSERT(m_graph, nullptr, accessData);
            insertionSet.insertNode(0, SpecNone, 
                SetLocal, origin, OpInfo(accessData), Edge(undefined));
        }
    }
Node* emitCodeToGetArgumentsArrayLength(
    InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
{
    Graph& graph = insertionSet.graph();

    DFG_ASSERT(
        graph, arguments,
        arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments
        || arguments->op() == CreateClonedArguments || arguments->op() == PhantomDirectArguments
        || arguments->op() == PhantomClonedArguments);

    InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;

    if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
        return insertionSet.insertConstant(
            nodeIndex, origin, jsNumber(inlineCallFrame->arguments.size() - 1));
    }

    Node* argumentCount;
    if (!inlineCallFrame)
        argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32, GetArgumentCount, origin);
    else {
        VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount);

        argumentCount = insertionSet.insertNode(
            nodeIndex, SpecInt32, GetStack, origin,
            OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32)));
    }

    return insertionSet.insertNode(
        nodeIndex, SpecInt32, ArithSub, origin, OpInfo(Arith::Unchecked),
        Edge(argumentCount, Int32Use),
        insertionSet.insertConstantForUse(
            nodeIndex, origin, jsNumber(1), Int32Use));
}
Ejemplo n.º 3
0
OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
    : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic)
    , m_jsValueSource(jsValueSource)
    , m_valueProfile(valueProfile)
    , m_patchableCodeOffset(0)
    , m_recoveryIndex(recoveryIndex)
    , m_streamIndex(streamIndex)
{
    DFG_ASSERT(jit->m_jit.graph(), jit->m_currentNode, jit->m_origin.exitOK);
}
Ejemplo n.º 4
0
Node* emitCodeToGetArgumentsArrayLength(
    InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
{
    Graph& graph = insertionSet.graph();

    DFG_ASSERT(
        graph, arguments,
        arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments
        || arguments->op() == CreateClonedArguments || arguments->op() == CreateRest
        || arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments || arguments->op() == PhantomCreateRest);
    
    InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;

    unsigned numberOfArgumentsToSkip = 0;
    if (arguments->op() == CreateRest || arguments->op() == PhantomCreateRest)
        numberOfArgumentsToSkip = arguments->numberOfArgumentsToSkip();
    
    if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
        unsigned argumentsSize = inlineCallFrame->arguments.size() - 1;
        if (argumentsSize >= numberOfArgumentsToSkip)
            argumentsSize -= numberOfArgumentsToSkip;
        else
            argumentsSize = 0;
        return insertionSet.insertConstant(
            nodeIndex, origin, jsNumber(argumentsSize));
    }
    
    Node* argumentCount;
    if (!inlineCallFrame)
        argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32Only, GetArgumentCountIncludingThis, origin);
    else {
        VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);
        
        argumentCount = insertionSet.insertNode(
            nodeIndex, SpecInt32Only, GetStack, origin,
            OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32)));
    }

    Node* result = insertionSet.insertNode(
        nodeIndex, SpecInt32Only, ArithSub, origin, OpInfo(Arith::Unchecked),
        Edge(argumentCount, Int32Use),
        insertionSet.insertConstantForUse(
            nodeIndex, origin, jsNumber(1 + numberOfArgumentsToSkip), Int32Use));

    if (numberOfArgumentsToSkip) {
        // The above subtraction may produce a negative number if this number is non-zero. We correct that here.
        result = insertionSet.insertNode(
            nodeIndex, SpecInt32Only, ArithMax, origin, 
            Edge(result, Int32Use), 
            insertionSet.insertConstantForUse(nodeIndex, origin, jsNumber(0), Int32Use));
        result->setResult(NodeResultInt32);
    }

    return result;
}
 bool run()
 {
     m_graph.m_dominators.computeIfNecessary(m_graph);
     m_graph.m_naturalLoops.computeIfNecessary(m_graph);
     
     for (unsigned loopIndex = m_graph.m_naturalLoops.numLoops(); loopIndex--;) {
         const NaturalLoop& loop = m_graph.m_naturalLoops.loop(loopIndex);
         BasicBlock* existingPreHeader = 0;
         bool needsNewPreHeader = false;
         for (unsigned predecessorIndex = loop.header()->predecessors.size(); predecessorIndex--;) {
             BasicBlock* predecessor = loop.header()->predecessors[predecessorIndex];
             if (m_graph.m_dominators.dominates(loop.header(), predecessor))
                 continue;
             if (!existingPreHeader) {
                 existingPreHeader = predecessor;
                 continue;
             }
             // We won't have duplicate entries in the predecessors list.
             DFG_ASSERT(m_graph, nullptr, existingPreHeader != predecessor);
             needsNewPreHeader = true;
             break;
         }
         
         // This phase should only be run on a DFG where unreachable blocks have been pruned.
         // We also don't allow loops back to root. This means that every loop header has got
         // to have a pre-header.
         DFG_ASSERT(m_graph, nullptr, existingPreHeader);
         
         // We are looking at the predecessors of a loop header. A loop header has to have
         // some predecessor other than the pre-header. We must have broken critical edges
         // because that is the DFG SSA convention. Therefore, each predecessor of the loop
         // header must have only one successor.
         DFG_ASSERT(m_graph, nullptr, existingPreHeader->terminal()->op() == Jump);
         
         if (!needsNewPreHeader)
             continue;
         
         createPreHeader(m_graph, m_insertionSet, loop.header());
     }
     
     return m_insertionSet.execute();
 }
    bool run()
    {
        DFG_ASSERT(m_graph, nullptr, m_graph.m_form == LoadStore);

        InsertionSet insertionSet(m_graph);
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
            treatRegularBlock(block, insertionSet);
            insertionSet.execute(block);
        }

        treatRootBlock(m_graph.block(0), insertionSet);
        insertionSet.execute(m_graph.block(0));

        return true;
    }
Ejemplo n.º 7
0
OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
    : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic)
    , m_jsValueSource(jsValueSource)
    , m_valueProfile(valueProfile)
    , m_patchableCodeOffset(0)
    , m_recoveryIndex(recoveryIndex)
    , m_streamIndex(streamIndex)
    , m_willArriveAtOSRExitFromGenericUnwind(false)
{
    bool canExit = jit->m_origin.exitOK;
    if (!canExit && jit->m_currentNode) {
        ExitMode exitMode = mayExit(jit->m_jit.graph(), jit->m_currentNode);
        canExit = exitMode == ExitMode::Exits || exitMode == ExitMode::ExitsForExceptions;
    }
    DFG_ASSERT(jit->m_jit.graph(), jit->m_currentNode, canExit);
}
    bool run()
    {
        DFG_ASSERT(m_graph, nullptr, m_graph.m_form == LoadStore);

        if (!m_graph.m_hasExceptionHandlers)
            return false;

        InsertionSet insertionSet(m_graph);
        if (m_graph.m_hasExceptionHandlers) {
            for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
                handleBlockForTryCatch(block, insertionSet);
                insertionSet.execute(block);
            }
        }

        return true;
    }
Ejemplo n.º 9
0
    bool run()
    {
        DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
        
        m_graph.ensureDominators();
        m_graph.ensureNaturalLoops();
        m_graph.ensureControlEquivalenceAnalysis();

        if (verbose) {
            dataLog("Graph before LICM:\n");
            m_graph.dump();
        }
        
        m_data.resize(m_graph.m_naturalLoops->numLoops());
        
        // Figure out the set of things each loop writes to, not including blocks that
        // belong to inner loops. We fix this later.
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            
            // Skip blocks that are proved to not execute.
            // FIXME: This shouldn't be needed.
            // https://bugs.webkit.org/show_bug.cgi?id=128584
            if (!block->cfaHasVisited)
                continue;
            
            const NaturalLoop* loop = m_graph.m_naturalLoops->innerMostLoopOf(block);
            if (!loop)
                continue;
            LoopData& data = m_data[loop->index()];
            for (auto* node : *block) {
                // Don't look beyond parts of the code that definitely always exit.
                // FIXME: This shouldn't be needed.
                // https://bugs.webkit.org/show_bug.cgi?id=128584
                if (node->op() == ForceOSRExit)
                    break;

                addWrites(m_graph, node, data.writes);
            }
        }
        
        // For each loop:
        // - Identify its pre-header.
        // - Make sure its outer loops know what it clobbers.
        for (unsigned loopIndex = m_graph.m_naturalLoops->numLoops(); loopIndex--;) {
            const NaturalLoop& loop = m_graph.m_naturalLoops->loop(loopIndex);
            LoopData& data = m_data[loop.index()];
            
            for (
                const NaturalLoop* outerLoop = m_graph.m_naturalLoops->innerMostOuterLoop(loop);
                outerLoop;
                outerLoop = m_graph.m_naturalLoops->innerMostOuterLoop(*outerLoop))
                m_data[outerLoop->index()].writes.addAll(data.writes);
            
            BasicBlock* header = loop.header();
            BasicBlock* preHeader = nullptr;
            unsigned numberOfPreHeaders = 0; // We're cool if this is 1.

            // This is guaranteed because we expect the CFG not to have unreachable code. Therefore, a
            // loop header must have a predecessor. (Also, we don't allow the root block to be a loop,
            // which cuts out the one other way of having a loop header with only one predecessor.)
            DFG_ASSERT(m_graph, header->at(0), header->predecessors.size() > 1);
            
            for (unsigned i = header->predecessors.size(); i--;) {
                BasicBlock* predecessor = header->predecessors[i];
                if (m_graph.m_dominators->dominates(header, predecessor))
                    continue;

                preHeader = predecessor;
                ++numberOfPreHeaders;
            }

            // We need to validate the pre-header. There are a bunch of things that could be wrong
            // about it:
            //
            // - There might be more than one. This means that pre-header creation either did not run,
            //   or some CFG transformation destroyed the pre-headers.
            //
            // - It may not be legal to exit at the pre-header. That would be a real bummer. Currently,
            //   LICM assumes that it can always hoist checks. See
            //   https://bugs.webkit.org/show_bug.cgi?id=148545. Though even with that fixed, we anyway
            //   would need to check if it's OK to exit at the pre-header since if we can't then we
            //   would have to restrict hoisting to non-exiting nodes.

            if (numberOfPreHeaders != 1)
                continue;

            // This is guaranteed because the header has multiple predecessors and critical edges are
            // broken. Therefore the predecessors must all have one successor, which implies that they
            // must end in a Jump.
            DFG_ASSERT(m_graph, preHeader->terminal(), preHeader->terminal()->op() == Jump);

            if (!preHeader->terminal()->origin.exitOK)
                continue;
            
            data.preHeader = preHeader;
        }
        
        m_graph.initializeNodeOwners();
        
        // Walk all basic blocks that belong to loops, looking for hoisting opportunities.
        // We try to hoist to the outer-most loop that permits it. Hoisting is valid if:
        // - The node doesn't write anything.
        // - The node doesn't read anything that the loop writes.
        // - The preHeader is valid (i.e. it passed the validation above).
        // - The preHeader's state at tail makes the node safe to execute.
        // - The loop's children all belong to nodes that strictly dominate the loop header.
        // - The preHeader's state at tail is still valid. This is mostly to save compile
        //   time and preserve some kind of sanity, if we hoist something that must exit.
        //
        // Also, we need to remember to:
        // - Update the state-at-tail with the node we hoisted, so future hoist candidates
        //   know about any type checks we hoisted.
        //
        // For maximum profit, we walk blocks in DFS order to ensure that we generally
        // tend to hoist dominators before dominatees.
        Vector<const NaturalLoop*> loopStack;
        bool changed = false;
        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
            const NaturalLoop* loop = m_graph.m_naturalLoops->innerMostLoopOf(block);
            if (!loop)
                continue;
            
            loopStack.resize(0);
            for (
                const NaturalLoop* current = loop;
                current;
                current = m_graph.m_naturalLoops->innerMostOuterLoop(*current))
                loopStack.append(current);
            
            // Remember: the loop stack has the inner-most loop at index 0, so if we want
            // to bias hoisting to outer loops then we need to use a reverse loop.
            
            if (verbose) {
                dataLog(
                    "Attempting to hoist out of block ", *block, " in loops:\n");
                for (unsigned stackIndex = loopStack.size(); stackIndex--;) {
                    dataLog(
                        "        ", *loopStack[stackIndex], ", which writes ",
                        m_data[loopStack[stackIndex]->index()].writes, "\n");
                }
            }
            
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node*& nodeRef = block->at(nodeIndex);
                if (doesWrites(m_graph, nodeRef)) {
                    if (verbose)
                        dataLog("    Not hoisting ", nodeRef, " because it writes things.\n");
                    continue;
                }

                for (unsigned stackIndex = loopStack.size(); stackIndex--;)
                    changed |= attemptHoist(block, nodeRef, loopStack[stackIndex]);
            }
        }
        
        return changed;
    }
Ejemplo n.º 10
0
    bool attemptHoist(BasicBlock* fromBlock, Node*& nodeRef, const NaturalLoop* loop)
    {
        Node* node = nodeRef;
        LoopData& data = m_data[loop->index()];

        if (!data.preHeader) {
            if (verbose)
                dataLog("    Not hoisting ", node, " because the pre-header is invalid.\n");
            return false;
        }
        
        if (!data.preHeader->cfaDidFinish) {
            if (verbose)
                dataLog("    Not hoisting ", node, " because CFA is invalid.\n");
            return false;
        }
        
        if (!edgesDominate(m_graph, node, data.preHeader)) {
            if (verbose) {
                dataLog(
                    "    Not hoisting ", node, " because it isn't loop invariant.\n");
            }
            return false;
        }
        
        // FIXME: At this point if the hoisting of the full node fails but the node has type checks,
        // we could still hoist just the checks.
        // https://bugs.webkit.org/show_bug.cgi?id=144525
        
        if (readsOverlap(m_graph, node, data.writes)) {
            if (verbose) {
                dataLog(
                    "    Not hoisting ", node,
                    " because it reads things that the loop writes.\n");
            }
            return false;
        }
        
        m_state.initializeTo(data.preHeader);
        if (!safeToExecute(m_state, m_graph, node)) {
            if (verbose) {
                dataLog(
                    "    Not hoisting ", node, " because it isn't safe to execute.\n");
            }
            return false;
        }
        
        NodeOrigin originalOrigin = node->origin;

        // NOTE: We could just use BackwardsDominators here directly, since we already know that the
        // preHeader dominates fromBlock. But we wouldn't get anything from being so clever, since
        // dominance checks are O(1) and only a few integer compares.
        bool addsBlindSpeculation = mayExit(m_graph, node, m_state)
            && !m_graph.m_controlEquivalenceAnalysis->dominatesEquivalently(data.preHeader, fromBlock);
        
        if (addsBlindSpeculation
            && m_graph.baselineCodeBlockFor(originalOrigin.semantic)->hasExitSite(FrequentExitSite(HoistingFailed))) {
            if (verbose) {
                dataLog(
                    "    Not hoisting ", node, " because it may exit and the pre-header (",
                    *data.preHeader, ") is not control equivalent to the node's original block (",
                    *fromBlock, ") and hoisting had previously failed.\n");
            }
            return false;
        }
        
        if (verbose) {
            dataLog(
                "    Hoisting ", node, " from ", *fromBlock, " to ", *data.preHeader,
                "\n");
        }

        // FIXME: We should adjust the Check: flags on the edges of node. There are phases that assume
        // that those flags are correct even if AI is stale.
        // https://bugs.webkit.org/show_bug.cgi?id=148544
        data.preHeader->insertBeforeTerminal(node);
        node->owner = data.preHeader;
        NodeOrigin terminalOrigin = data.preHeader->terminal()->origin;
        node->origin = terminalOrigin.withSemantic(node->origin.semantic);
        node->origin.wasHoisted |= addsBlindSpeculation;
        
        // Modify the states at the end of the preHeader of the loop we hoisted to,
        // and all pre-headers inside the loop. This isn't a stability bottleneck right now
        // because most loops are small and most blocks belong to few loops.
        for (unsigned bodyIndex = loop->size(); bodyIndex--;) {
            BasicBlock* subBlock = loop->at(bodyIndex);
            const NaturalLoop* subLoop = m_graph.m_naturalLoops->headerOf(subBlock);
            if (!subLoop)
                continue;
            BasicBlock* subPreHeader = m_data[subLoop->index()].preHeader;
            // We may not have given this loop a pre-header because either it didn't have exitOK
            // or the header had multiple predecessors that it did not dominate. In that case the
            // loop wouldn't be a hoisting candidate anyway, so we don't have to do anything.
            if (!subPreHeader)
                continue;
            // The pre-header's tail may be unreachable, in which case we have nothing to do.
            if (!subPreHeader->cfaDidFinish)
                continue;
            m_state.initializeTo(subPreHeader);
            m_interpreter.execute(node);
        }
        
        // It just so happens that all of the nodes we currently know how to hoist
        // don't have var-arg children. That may change and then we can fix this
        // code. But for now we just assert that's the case.
        DFG_ASSERT(m_graph, node, !(node->flags() & NodeHasVarArgs));
        
        nodeRef = m_graph.addNode(Check, originalOrigin, node->children);
        
        return true;
    }
    bool run()
    {
        DFG_ASSERT(m_graph, nullptr, m_graph.m_form == ThreadedCPS);
        
        ScoreBoard scoreBoard(m_graph.m_nextMachineLocal);
        scoreBoard.assertClear();
        for (size_t blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            if (!block->isReachable)
                continue;
            if (!ASSERT_DISABLED) {
                // Force usage of highest-numbered virtual registers.
                scoreBoard.sortFree();
            }
            for (size_t indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
                Node* node = block->at(indexInBlock);
        
                if (!node->shouldGenerate())
                    continue;
                
                switch (node->op()) {
                case Phi:
                case Flush:
                case PhantomLocal:
                    continue;
                case GetLocal:
                    ASSERT(!node->child1()->hasResult());
                    break;
                default:
                    break;
                }
                
                // First, call use on all of the current node's children, then
                // allocate a VirtualRegister for this node. We do so in this
                // order so that if a child is on its last use, and a
                // VirtualRegister is freed, then it may be reused for node.
                if (node->flags() & NodeHasVarArgs) {
                    for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
                        scoreBoard.useIfHasResult(m_graph.m_varArgChildren[childIdx]);
                } else {
                    scoreBoard.useIfHasResult(node->child1());
                    scoreBoard.useIfHasResult(node->child2());
                    scoreBoard.useIfHasResult(node->child3());
                }

                if (!node->hasResult())
                    continue;

                VirtualRegister virtualRegister = scoreBoard.allocate();
                node->setVirtualRegister(virtualRegister);
                // 'mustGenerate' nodes have their useCount artificially elevated,
                // call use now to account for this.
                if (node->mustGenerate())
                    scoreBoard.use(node);
            }
            scoreBoard.assertClear();
        }
        
        // Record the number of virtual registers we're using. This is used by calls
        // to figure out where to put the parameters.
        m_graph.m_nextMachineLocal = scoreBoard.highWatermark();

        return true;
    }
Ejemplo n.º 12
0
    bool attemptHoist(BasicBlock* fromBlock, Node*& nodeRef, const NaturalLoop* loop)
    {
        Node* node = nodeRef;
        LoopData& data = m_data[loop->index()];

        if (!data.preHeader) {
            if (verbose)
                dataLog("    Not hoisting ", node, " because the pre-header is invalid.\n");
            return false;
        }
        
        if (!data.preHeader->cfaDidFinish) {
            if (verbose)
                dataLog("    Not hoisting ", node, " because CFA is invalid.\n");
            return false;
        }
        
        if (!edgesDominate(m_graph, node, data.preHeader)) {
            if (verbose) {
                dataLog(
                    "    Not hoisting ", node, " because it isn't loop invariant.\n");
            }
            return false;
        }
        
        // FIXME: At this point if the hoisting of the full node fails but the node has type checks,
        // we could still hoist just the checks.
        // https://bugs.webkit.org/show_bug.cgi?id=144525
        
        // FIXME: If a node has a type check - even something like a CheckStructure - then we should
        // only hoist the node if we know that it will execute on every loop iteration or if we know
        // that the type check will always succeed at the loop pre-header through some other means
        // (like looking at prediction propagation results). Otherwise, we might make a mistake like
        // this:
        //
        // var o = ...; // sometimes null and sometimes an object with structure S1.
        // for (...) {
        //     if (o)
        //         ... = o.f; // CheckStructure and GetByOffset, which we will currently hoist.
        // }
        //
        // When we encounter such code, we'll hoist the CheckStructure and GetByOffset and then we
        // will have a recompile. We'll then end up thinking that the get_by_id needs to be
        // polymorphic, which is false.
        //
        // We can counter this by either having a control flow equivalence check, or by consulting
        // prediction propagation to see if the check would always succeed. Prediction propagation
        // would not be enough for things like:
        //
        // var p = ...; // some boolean predicate
        // var o = {};
        // if (p)
        //     o.f = 42;
        // for (...) {
        //     if (p)
        //         ... = o.f;
        // }
        //
        // Prediction propagation can't tell us anything about the structure, and the CheckStructure
        // will appear to be hoistable because the loop doesn't clobber structures. The cell check
        // in the CheckStructure will be hoistable though, since prediction propagation can tell us
        // that o is always SpecFinalObject. In cases like this, control flow equivalence is the
        // only effective guard.
        //
        // https://bugs.webkit.org/show_bug.cgi?id=144527
        
        if (readsOverlap(m_graph, node, data.writes)) {
            if (verbose) {
                dataLog(
                    "    Not hoisting ", node,
                    " because it reads things that the loop writes.\n");
            }
            return false;
        }
        
        m_state.initializeTo(data.preHeader);
        if (!safeToExecute(m_state, m_graph, node)) {
            if (verbose) {
                dataLog(
                    "    Not hoisting ", node, " because it isn't safe to execute.\n");
            }
            return false;
        }
        
        if (verbose) {
            dataLog(
                "    Hoisting ", node, " from ", *fromBlock, " to ", *data.preHeader,
                "\n");
        }

        // FIXME: We should adjust the Check: flags on the edges of node. There are phases that assume
        // that those flags are correct even if AI is stale.
        // https://bugs.webkit.org/show_bug.cgi?id=148544
        data.preHeader->insertBeforeTerminal(node);
        node->owner = data.preHeader;
        NodeOrigin originalOrigin = node->origin;
        node->origin = data.preHeader->terminal()->origin.withSemantic(node->origin.semantic);
        
        // Modify the states at the end of the preHeader of the loop we hoisted to,
        // and all pre-headers inside the loop. This isn't a stability bottleneck right now
        // because most loops are small and most blocks belong to few loops.
        for (unsigned bodyIndex = loop->size(); bodyIndex--;) {
            BasicBlock* subBlock = loop->at(bodyIndex);
            const NaturalLoop* subLoop = m_graph.m_naturalLoops.headerOf(subBlock);
            if (!subLoop)
                continue;
            BasicBlock* subPreHeader = m_data[subLoop->index()].preHeader;
            if (!subPreHeader->cfaDidFinish)
                continue;
            m_state.initializeTo(subPreHeader);
            m_interpreter.execute(node);
        }
        
        // It just so happens that all of the nodes we currently know how to hoist
        // don't have var-arg children. That may change and then we can fix this
        // code. But for now we just assert that's the case.
        DFG_ASSERT(m_graph, node, !(node->flags() & NodeHasVarArgs));
        
        nodeRef = m_graph.addNode(SpecNone, Check, originalOrigin, node->children);
        
        return true;
    }
    void handleNode()
    {
        switch (m_node->op()) {
        case BitOr:
            handleCommutativity();

            if (m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) {
                convertToIdentityOverChild1();
                break;
            }
            break;

        case BitXor:
        case BitAnd:
            handleCommutativity();
            break;

        case BitLShift:
        case BitRShift:
        case BitURShift:
            if (m_node->child2()->isInt32Constant() && !(m_node->child2()->asInt32() & 0x1f)) {
                convertToIdentityOverChild1();
                break;
            }
            break;

        case UInt32ToNumber:
            if (m_node->child1()->op() == BitURShift
                    && m_node->child1()->child2()->isInt32Constant()
                    && (m_node->child1()->child2()->asInt32() & 0x1f)
                    && m_node->arithMode() != Arith::DoOverflow) {
                m_node->convertToIdentity();
                m_changed = true;
                break;
            }
            break;

        case ArithAdd:
            handleCommutativity();

            if (m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) {
                convertToIdentityOverChild1();
                break;
            }
            break;

        case ArithMul:
            handleCommutativity();
            break;

        case ArithSub:
            if (m_node->child2()->isInt32Constant()
                    && m_node->isBinaryUseKind(Int32Use)) {
                int32_t value = m_node->child2()->asInt32();
                if (-value != value) {
                    m_node->setOp(ArithAdd);
                    m_node->child2().setNode(
                        m_insertionSet.insertConstant(
                            m_nodeIndex, m_node->origin, jsNumber(-value)));
                    m_changed = true;
                    break;
                }
            }
            break;

        case GetArrayLength:
            if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node))
                foldTypedArrayPropertyToConstant(view, jsNumber(view->length()));
            break;

        case GetTypedArrayByteOffset:
            if (JSArrayBufferView* view = m_graph.tryGetFoldableView(m_node->child1().node()))
                foldTypedArrayPropertyToConstant(view, jsNumber(view->byteOffset()));
            break;

        case GetIndexedPropertyStorage:
            if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node)) {
                if (view->mode() != FastTypedArray) {
                    prepareToFoldTypedArray(view);
                    m_node->convertToConstantStoragePointer(view->vector());
                    m_changed = true;
                    break;
                } else {
                    // FIXME: It would be awesome to be able to fold the property storage for
                    // these GC-allocated typed arrays. For now it doesn't matter because the
                    // most common use-cases for constant typed arrays involve large arrays with
                    // aliased buffer views.
                    // https://bugs.webkit.org/show_bug.cgi?id=125425
                }
            }
            break;

        case ValueRep:
        case Int52Rep:
        case DoubleRep: {
            // This short-circuits circuitous conversions, like ValueRep(DoubleRep(value)) or
            // even more complicated things. Like, it can handle a beast like
            // ValueRep(DoubleRep(Int52Rep(value))).

            // The only speculation that we would do beyond validating that we have a type that
            // can be represented a certain way is an Int32 check that would appear on Int52Rep
            // nodes. For now, if we see this and the final type we want is an Int52, we use it
            // as an excuse not to fold. The only thing we would need is a Int52RepInt32Use kind.
            bool hadInt32Check = false;
            if (m_node->op() == Int52Rep) {
                if (m_node->child1().useKind() != Int32Use)
                    break;
                hadInt32Check = true;
            }
            for (Node* node = m_node->child1().node(); ; node = node->child1().node()) {
                if (canonicalResultRepresentation(node->result()) ==
                        canonicalResultRepresentation(m_node->result())) {
                    m_insertionSet.insertNode(
                        m_nodeIndex, SpecNone, Phantom, m_node->origin, m_node->child1());
                    if (hadInt32Check) {
                        // FIXME: Consider adding Int52RepInt32Use or even DoubleRepInt32Use,
                        // which would be super weird. The latter would only arise in some
                        // seriously circuitous conversions.
                        if (canonicalResultRepresentation(node->result()) != NodeResultJS)
                            break;

                        m_insertionSet.insertNode(
                            m_nodeIndex, SpecNone, Phantom, m_node->origin,
                            Edge(node, Int32Use));
                    }
                    m_node->child1() = node->defaultEdge();
                    m_node->convertToIdentity();
                    m_changed = true;
                    break;
                }

                switch (node->op()) {
                case Int52Rep:
                    if (node->child1().useKind() != Int32Use)
                        break;
                    hadInt32Check = true;
                    continue;

                case DoubleRep:
                case ValueRep:
                    continue;

                default:
                    break;
                }
                break;
            }
            break;
        }

        case Flush: {
            ASSERT(m_graph.m_form != SSA);

            Node* setLocal = nullptr;
            VirtualRegister local = m_node->local();

            if (m_node->variableAccessData()->isCaptured()) {
                for (unsigned i = m_nodeIndex; i--;) {
                    Node* node = m_block->at(i);
                    bool done = false;
                    switch (node->op()) {
                    case GetLocal:
                    case Flush:
                        if (node->local() == local)
                            done = true;
                        break;

                    case GetLocalUnlinked:
                        if (node->unlinkedLocal() == local)
                            done = true;
                        break;

                    case SetLocal: {
                        if (node->local() != local)
                            break;
                        setLocal = node;
                        done = true;
                        break;
                    }

                    case Phantom:
                    case Check:
                    case HardPhantom:
                    case MovHint:
                    case JSConstant:
                    case DoubleConstant:
                    case Int52Constant:
                        break;

                    default:
                        done = true;
                        break;
                    }
                    if (done)
                        break;
                }
            } else {
                for (unsigned i = m_nodeIndex; i--;) {
                    Node* node = m_block->at(i);
                    if (node->op() == SetLocal && node->local() == local) {
                        setLocal = node;
                        break;
                    }
                    if (accessesOverlap(m_graph, node, AbstractHeap(Variables, local)))
                        break;
                }
            }

            if (!setLocal)
                break;

            m_node->convertToPhantom();
            Node* dataNode = setLocal->child1().node();
            DFG_ASSERT(m_graph, m_node, dataNode->hasResult());
            m_node->child1() = dataNode->defaultEdge();
            m_graph.dethread();
            m_changed = true;
            break;
        }

        default:
            break;
        }
    }
Ejemplo n.º 14
0
    bool run()
    {
        DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
        
        m_graph.m_dominators.computeIfNecessary(m_graph);
        m_graph.m_naturalLoops.computeIfNecessary(m_graph);
        
        m_data.resize(m_graph.m_naturalLoops.numLoops());
        
        // Figure out the set of things each loop writes to, not including blocks that
        // belong to inner loops. We fix this later.
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            
            // Skip blocks that are proved to not execute.
            // FIXME: This shouldn't be needed.
            // https://bugs.webkit.org/show_bug.cgi?id=128584
            if (!block->cfaHasVisited)
                continue;
            
            const NaturalLoop* loop = m_graph.m_naturalLoops.innerMostLoopOf(block);
            if (!loop)
                continue;
            LoopData& data = m_data[loop->index()];
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node* node = block->at(nodeIndex);
                
                // Don't look beyond parts of the code that definitely always exit.
                // FIXME: This shouldn't be needed.
                // https://bugs.webkit.org/show_bug.cgi?id=128584
                if (node->op() == ForceOSRExit)
                    break;

                addWrites(m_graph, node, data.writes);
            }
        }
        
        // For each loop:
        // - Identify its pre-header.
        // - Make sure its outer loops know what it clobbers.
        for (unsigned loopIndex = m_graph.m_naturalLoops.numLoops(); loopIndex--;) {
            const NaturalLoop& loop = m_graph.m_naturalLoops.loop(loopIndex);
            LoopData& data = m_data[loop.index()];
            for (
                const NaturalLoop* outerLoop = m_graph.m_naturalLoops.innerMostOuterLoop(loop);
                outerLoop;
                outerLoop = m_graph.m_naturalLoops.innerMostOuterLoop(*outerLoop))
                m_data[outerLoop->index()].writes.addAll(data.writes);
            
            BasicBlock* header = loop.header();
            BasicBlock* preHeader = 0;
            for (unsigned i = header->predecessors.size(); i--;) {
                BasicBlock* predecessor = header->predecessors[i];
                if (m_graph.m_dominators.dominates(header, predecessor))
                    continue;
                DFG_ASSERT(m_graph, nullptr, !preHeader || preHeader == predecessor);
                preHeader = predecessor;
            }
            
            DFG_ASSERT(m_graph, preHeader->terminal(), preHeader->terminal()->op() == Jump);
            
            // We should validate the pre-header. If we placed forExit origins on nodes only if
            // at the top of that node it is legal to exit, then we would simply check if Jump
            // had a forExit. We should disable hoisting to pre-headers that don't validate.
            // Or, we could only allow hoisting of things that definitely don't exit.
            // FIXME: https://bugs.webkit.org/show_bug.cgi?id=145204
            
            data.preHeader = preHeader;
        }
        
        m_graph.initializeNodeOwners();
        
        // Walk all basic blocks that belong to loops, looking for hoisting opportunities.
        // We try to hoist to the outer-most loop that permits it. Hoisting is valid if:
        // - The node doesn't write anything.
        // - The node doesn't read anything that the loop writes.
        // - The preHeader's state at tail makes the node safe to execute.
        // - The loop's children all belong to nodes that strictly dominate the loop header.
        // - The preHeader's state at tail is still valid. This is mostly to save compile
        //   time and preserve some kind of sanity, if we hoist something that must exit.
        //
        // Also, we need to remember to:
        // - Update the state-at-tail with the node we hoisted, so future hoist candidates
        //   know about any type checks we hoisted.
        //
        // For maximum profit, we walk blocks in DFS order to ensure that we generally
        // tend to hoist dominators before dominatees.
        Vector<const NaturalLoop*> loopStack;
        bool changed = false;
        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
            const NaturalLoop* loop = m_graph.m_naturalLoops.innerMostLoopOf(block);
            if (!loop)
                continue;
            
            loopStack.resize(0);
            for (
                const NaturalLoop* current = loop;
                current;
                current = m_graph.m_naturalLoops.innerMostOuterLoop(*current))
                loopStack.append(current);
            
            // Remember: the loop stack has the inner-most loop at index 0, so if we want
            // to bias hoisting to outer loops then we need to use a reverse loop.
            
            if (verbose) {
                dataLog(
                    "Attempting to hoist out of block ", *block, " in loops:\n");
                for (unsigned stackIndex = loopStack.size(); stackIndex--;) {
                    dataLog(
                        "        ", *loopStack[stackIndex], ", which writes ",
                        m_data[loopStack[stackIndex]->index()].writes, "\n");
                }
            }
            
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node*& nodeRef = block->at(nodeIndex);
                if (doesWrites(m_graph, nodeRef)) {
                    if (verbose)
                        dataLog("    Not hoisting ", nodeRef, " because it writes things.\n");
                    continue;
                }

                for (unsigned stackIndex = loopStack.size(); stackIndex--;)
                    changed |= attemptHoist(block, nodeRef, loopStack[stackIndex]);
            }
        }
        
        return changed;
    }
    bool run()
    {
        // FIXME: We should make this work in SSA. https://bugs.webkit.org/show_bug.cgi?id=148260
        DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA);
        
        const bool extremeLogging = false;

        bool outerChanged = false;
        bool innerChanged;
        
        do {
            innerChanged = false;
            for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                ASSERT(block->isReachable);
            
                switch (block->terminal()->op()) {
                case Jump: {
                    // Successor with one predecessor -> merge.
                    if (block->successor(0)->predecessors.size() == 1) {
                        ASSERT(block->successor(0)->predecessors[0] == block);
                        if (extremeLogging)
                            m_graph.dump();
                        m_graph.dethread();
                        mergeBlocks(block, block->successor(0), noBlocks());
                        innerChanged = outerChanged = true;
                        break;
                    }
                
                    // FIXME: Block only has a jump -> remove. This is tricky though because of
                    // liveness. What we really want is to slam in a phantom at the end of the
                    // block, after the terminal. But we can't right now. :-(
                    // Idea: what if I slam the ghosties into my successor? Nope, that's
                    // suboptimal, because if my successor has multiple predecessors then we'll
                    // be keeping alive things on other predecessor edges unnecessarily.
                    // What we really need is the notion of end-of-block ghosties!
                    // FIXME: Allow putting phantoms after terminals.
                    // https://bugs.webkit.org/show_bug.cgi?id=126778
                    break;
                }
                
                case Branch: {
                    // Branch on constant -> jettison the not-taken block and merge.
                    if (isKnownDirection(block->cfaBranchDirection)) {
                        bool condition = branchCondition(block->cfaBranchDirection);
                        BasicBlock* targetBlock = block->successorForCondition(condition);
                        BasicBlock* jettisonedBlock = block->successorForCondition(!condition);
                        if (targetBlock->predecessors.size() == 1) {
                            if (extremeLogging)
                                m_graph.dump();
                            m_graph.dethread();
                            mergeBlocks(block, targetBlock, oneBlock(jettisonedBlock));
                        } else {
                            if (extremeLogging)
                                m_graph.dump();
                            m_graph.dethread();
                            
                            Node* terminal = block->terminal();
                            ASSERT(terminal->isTerminal());
                            NodeOrigin boundaryNodeOrigin = terminal->origin;

                            jettisonBlock(block, jettisonedBlock, boundaryNodeOrigin);

                            block->replaceTerminal(
                                m_graph, SpecNone, Jump, boundaryNodeOrigin,
                                OpInfo(targetBlock));
                            
                            ASSERT(block->terminal());
                        
                        }
                        innerChanged = outerChanged = true;
                        break;
                    }
                    
                    if (block->successor(0) == block->successor(1)) {
                        convertToJump(block, block->successor(0));
                        innerChanged = outerChanged = true;
                        break;
                    }
                    
                    // Branch to same destination -> jump.
                    // FIXME: this will currently not be hit because of the lack of jump-only
                    // block simplification.
                    
                    break;
                }
                    
                case Switch: {
                    SwitchData* data = block->terminal()->switchData();
                    
                    // Prune out cases that end up jumping to default.
                    for (unsigned i = 0; i < data->cases.size(); ++i) {
                        if (data->cases[i].target.block == data->fallThrough.block) {
                            data->fallThrough.count += data->cases[i].target.count;
                            data->cases[i--] = data->cases.last();
                            data->cases.removeLast();
                        }
                    }
                    
                    // If there are no cases other than default then this turns
                    // into a jump.
                    if (data->cases.isEmpty()) {
                        convertToJump(block, data->fallThrough.block);
                        innerChanged = outerChanged = true;
                        break;
                    }
                    
                    // Switch on constant -> jettison all other targets and merge.
                    Node* terminal = block->terminal();
                    if (terminal->child1()->hasConstant()) {
                        FrozenValue* value = terminal->child1()->constant();
                        TriState found = FalseTriState;
                        BasicBlock* targetBlock = 0;
                        for (unsigned i = data->cases.size(); found == FalseTriState && i--;) {
                            found = data->cases[i].value.strictEqual(value);
                            if (found == TrueTriState)
                                targetBlock = data->cases[i].target.block;
                        }
                        
                        if (found == MixedTriState)
                            break;
                        if (found == FalseTriState)
                            targetBlock = data->fallThrough.block;
                        ASSERT(targetBlock);
                        
                        Vector<BasicBlock*, 1> jettisonedBlocks;
                        for (BasicBlock* successor : terminal->successors()) {
                            if (successor != targetBlock)
                                jettisonedBlocks.append(successor);
                        }
                        
                        if (targetBlock->predecessors.size() == 1) {
                            if (extremeLogging)
                                m_graph.dump();
                            m_graph.dethread();
                            
                            mergeBlocks(block, targetBlock, jettisonedBlocks);
                        } else {
                            if (extremeLogging)
                                m_graph.dump();
                            m_graph.dethread();
                            
                            NodeOrigin boundaryNodeOrigin = terminal->origin;

                            for (unsigned i = jettisonedBlocks.size(); i--;)
                                jettisonBlock(block, jettisonedBlocks[i], boundaryNodeOrigin);
                            
                            block->replaceTerminal(
                                m_graph, SpecNone, Jump, boundaryNodeOrigin, OpInfo(targetBlock));
                        }
                        innerChanged = outerChanged = true;
                        break;
                    }
                    break;
                }
                    
                default:
                    break;
                }
            }
            
            if (innerChanged) {
                // Here's the reason for this pass:
                // Blocks: A, B, C, D, E, F
                // A -> B, C
                // B -> F
                // C -> D, E
                // D -> F
                // E -> F
                //
                // Assume that A's branch is determined to go to B. Then the rest of this phase
                // is smart enough to simplify down to:
                // A -> B
                // B -> F
                // C -> D, E
                // D -> F
                // E -> F
                //
                // We will also merge A and B. But then we don't have any other mechanism to
                // remove D, E as predecessors for F. Worse, the rest of this phase does not
                // know how to fix the Phi functions of F to ensure that they no longer refer
                // to variables in D, E. In general, we need a way to handle Phi simplification
                // upon:
                // 1) Removal of a predecessor due to branch simplification. The branch
                //    simplifier already does that.
                // 2) Invalidation of a predecessor because said predecessor was rendered
                //    unreachable. We do this here.
                //
                // This implies that when a block is unreachable, we must inspect its
                // successors' Phi functions to remove any references from them into the
                // removed block.
                
                m_graph.invalidateCFG();
                m_graph.resetReachability();
                m_graph.killUnreachableBlocks();
            }
            
            if (Options::validateGraphAtEachPhase())
                validate();
        } while (innerChanged);
        
        return outerChanged;
    }