Ejemplo n.º 1
0
UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
    : Base(*vm, structure)
    , m_numVars(0)
    , m_numCalleeRegisters(0)
    , m_numParameters(0)
    , m_vm(vm)
    , m_argumentsRegister(VirtualRegister())
    , m_globalObjectRegister(VirtualRegister())
    , m_needsFullScopeChain(info.m_needsActivation)
    , m_usesEval(info.m_usesEval)
    , m_isNumericCompareFunction(false)
    , m_isStrictMode(info.m_isStrictMode)
    , m_isConstructor(info.m_isConstructor)
    , m_hasCapturedVariables(false)
    , m_isBuiltinFunction(info.m_isBuiltinFunction)
    , m_firstLine(0)
    , m_lineCount(0)
    , m_endColumn(UINT_MAX)
    , m_features(0)
    , m_codeType(codeType)
    , m_arrayProfileCount(0)
    , m_arrayAllocationProfileCount(0)
    , m_objectAllocationProfileCount(0)
    , m_valueProfileCount(0)
    , m_llintCallLinkInfoCount(0)
#if ENABLE(BYTECODE_COMMENTS)
    , m_bytecodeCommentIterator(0)
#endif
{

}
void LocalOSRAvailabilityCalculator::executeNode(Node* node)
{
    switch (node->op()) {
    case PutLocal: {
        VariableAccessData* variable = node->variableAccessData();
        m_availability.m_locals.operand(variable->local()).setFlush(variable->flushedAt());
        break;
    }
        
    case KillLocal: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush));
        break;
    }

    case GetLocal: {
        VariableAccessData* variable = node->variableAccessData();
        m_availability.m_locals.operand(variable->local()) =
            Availability(node, variable->flushedAt());
        break;
    }

    case MovHint: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node());
        break;
    }

    case ZombieHint: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable();
        break;
    }
        
    case LoadVarargs: {
        LoadVarargsData* data = node->loadVarargsData();
        m_availability.m_locals.operand(data->count) =
            Availability(FlushedAt(FlushedInt32, data->machineCount));
        for (unsigned i = data->limit; i--;) {
            m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) =
                Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i)));
        }
        break;
    }
        
    default:
        break;
    }
    
    promoteHeapAccess(
        node,
        [&] (PromotedHeapLocation location, Edge value) {
            m_availability.m_heap.set(location, Availability(value.node()));
        },
        [&] (PromotedHeapLocation) { });
}
FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
{
    FastBitVector temp;
    FastBitVector result;

    getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, temp);

    unsigned numCapturedVars = numberOfCapturedVariables(m_codeBlock);
    if (numCapturedVars) {
        int firstCapturedLocal = VirtualRegister(captureStart(m_codeBlock)).toLocal();
        result.resize(temp.numBits() + numCapturedVars);
        for (unsigned i = 0; i < numCapturedVars; ++i)
            result.set(firstCapturedLocal + i);
    } else
        result.resize(temp.numBits());

    int tempLength = temp.numBits();
    ASSERT(tempLength >= 0);
    for (int i = 0; i < tempLength; i++) {
        if (!temp.get(i))
            continue;

        if (!numCapturedVars) {
            result.set(i);
            continue;
        }

        if (virtualRegisterForLocal(i).offset() > captureStart(m_codeBlock))
            result.set(i);
        else 
            result.set(numCapturedVars + i);
    }
    return result;
}
void MethodOfGettingAValueProfile::reportValue(JSValue value)
{
    switch (m_kind) {
    case None:
        return;

    case Ready:
        *u.profile->specFailBucket(0) = JSValue::encode(value);
        return;

    case LazyOperand: {
        LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));

        ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
        LazyOperandValueProfile* profile =
            u.lazyOperand.codeBlock->lazyOperandValueProfiles(locker).add(locker, key);
        *profile->specFailBucket(0) = JSValue::encode(value);
        return;
    }

    case ArithProfileReady: {
        u.arithProfile->observeResult(value);
        return;
    } }

    RELEASE_ASSERT_NOT_REACHED();
}
void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueRegs regs) const
{
    switch (m_kind) {
    case None:
        return;
        
    case Ready:
        jit.storeValue(regs, u.profile->specFailBucket(0));
        return;
        
    case LazyOperand: {
        LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
        
        ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
        LazyOperandValueProfile* profile =
            u.lazyOperand.codeBlock->lazyOperandValueProfiles(locker).add(locker, key);
        jit.storeValue(regs, profile->specFailBucket(0));
        return;
    }
        
    case ArithProfileReady: {
        u.arithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
        return;
    } }
    
    RELEASE_ASSERT_NOT_REACHED();
}
Ejemplo n.º 6
0
UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
    : Base(*vm, structure)
    , m_numVars(0)
    , m_numCalleeLocals(0)
    , m_numParameters(0)
    , m_globalObjectRegister(VirtualRegister())
    , m_usesEval(info.usesEval())
    , m_isStrictMode(info.isStrictMode())
    , m_isConstructor(info.isConstructor())
    , m_hasCapturedVariables(false)
    , m_isBuiltinFunction(info.isBuiltinFunction())
    , m_constructorKind(static_cast<unsigned>(info.constructorKind()))
    , m_superBinding(static_cast<unsigned>(info.superBinding()))
    , m_derivedContextType(static_cast<unsigned>(info.derivedContextType()))
    , m_evalContextType(static_cast<unsigned>(info.evalContextType()))
    , m_isArrowFunctionContext(info.isArrowFunctionContext())
    , m_isClassContext(info.isClassContext())
    , m_wasCompiledWithDebuggingOpcodes(debuggerMode == DebuggerMode::DebuggerOn || Options::forceDebuggerBytecodeGeneration())
    , m_firstLine(0)
    , m_lineCount(0)
    , m_endColumn(UINT_MAX)
    , m_parseMode(info.parseMode())
    , m_features(0)
    , m_codeType(codeType)
    , m_arrayProfileCount(0)
    , m_arrayAllocationProfileCount(0)
    , m_objectAllocationProfileCount(0)
    , m_valueProfileCount(0)
    , m_llintCallLinkInfoCount(0)
{
    for (auto& constantRegisterIndex : m_linkTimeConstants)
        constantRegisterIndex = 0;
    ASSERT(m_constructorKind == static_cast<unsigned>(info.constructorKind()));
}
static void stepOverInstruction(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, const UseFunctor& use, const DefFunctor& def)
{
    // This abstractly execute the instruction in reverse. Instructions logically first use operands and
    // then define operands. This logical ordering is necessary for operations that use and def the same
    // operand, like:
    //
    //     op_add loc1, loc1, loc2
    //
    // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add
    // operation cannot travel forward in time to read the value that it will produce after reading that
    // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of
    // uses before defs).
    //
    // Since this is a liveness analysis, this ordering ends up being particularly important: if we did
    // uses before defs, then the add operation above would appear to not have loc1 live, since we'd
    // first add it to the out set (the use), and then we'd remove it (the def).
    
    computeDefsForBytecodeOffset(
        codeBlock, block, bytecodeOffset,
        [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) {
            if (isValidRegisterForLiveness(codeBlock, operand))
                def(VirtualRegister(operand).toLocal());
        });

    computeUsesForBytecodeOffset(
        codeBlock, block, bytecodeOffset,
        [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) {
            if (isValidRegisterForLiveness(codeBlock, operand))
                use(VirtualRegister(operand).toLocal());
        });

    // If we have an exception handler, we want the live-in variables of the 
    // exception handler block to be included in the live-in of this particular bytecode.
    if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
        // FIXME: This resume check should not be needed.
        // https://bugs.webkit.org/show_bug.cgi?id=159281
        Interpreter* interpreter = codeBlock->vm()->interpreter;
        Instruction* instructionsBegin = codeBlock->instructions().begin();
        Instruction* instruction = &instructionsBegin[bytecodeOffset];
        OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode);
        if (opcodeID != op_resume) {
            BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target);
            ASSERT(handlerBlock);
            handlerBlock->in().forEachSetBit(use);
        }
    }
}
Ejemplo n.º 8
0
void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
{
    out.print("bc#", m_bytecodeIndex, ", machine code offset = ", m_machineCodeOffset);
    out.print(", stack rules = [");
    
    auto printOperand = [&] (VirtualRegister reg) {
        out.print(inContext(m_expectedValues.operand(reg), context), " (");
        VirtualRegister toReg;
        bool overwritten = false;
        for (OSREntryReshuffling reshuffling : m_reshufflings) {
            if (reg == VirtualRegister(reshuffling.fromOffset)) {
                toReg = VirtualRegister(reshuffling.toOffset);
                break;
            }
            if (reg == VirtualRegister(reshuffling.toOffset))
                overwritten = true;
        }
        if (!overwritten && !toReg.isValid())
            toReg = reg;
        if (toReg.isValid()) {
            if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
                out.print("ignored");
            else
                out.print("maps to ", toReg);
        } else
            out.print("overwritten");
        if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
            out.print(", forced double");
        if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal()))
            out.print(", forced machine int");
        out.print(")");
    };
    
    CommaPrinter comma;
    for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
        out.print(comma, "arg", argumentIndex, ":");
        printOperand(virtualRegisterForArgument(argumentIndex));
    }
    for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
        out.print(comma, "loc", localIndex, ":");
        printOperand(virtualRegisterForLocal(localIndex));
    }
    
    out.print("], machine stack used = ", m_machineStackUsed);
}
Ejemplo n.º 9
0
bool argumentsInvolveStackSlot(InlineCallFrame* inlineCallFrame, VirtualRegister reg)
{
    if (!inlineCallFrame)
        return (reg.isArgument() && reg.toArgument()) || reg.isHeader();
    
    if (inlineCallFrame->isClosureCall
        && reg == VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::callee))
        return true;
    
    if (inlineCallFrame->isVarargs()
        && reg == VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount))
        return true;
    
    unsigned numArguments = inlineCallFrame->arguments.size() - 1;
    VirtualRegister argumentStart =
        VirtualRegister(inlineCallFrame->stackOffset) + CallFrame::argumentOffset(0);
    return reg >= argumentStart && reg < argumentStart + numArguments;
}
Ejemplo n.º 10
0
void JIT_OPERATION operationLoadVarargs(ExecState* exec, int32_t firstElementDest, EncodedJSValue encodedArguments, int32_t offset, int32_t length, int32_t mandatoryMinimum)
{
    VM& vm = exec->vm();
    NativeCallFrameTracer tracer(&vm, exec);
    JSValue arguments = JSValue::decode(encodedArguments);
    
    loadVarargs(exec, VirtualRegister(firstElementDest), arguments, offset, length);
    
    for (int32_t i = length; i < mandatoryMinimum; ++i)
        exec->r(firstElementDest + i) = jsUndefined();
}
Ejemplo n.º 11
0
bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset)
{
    int numCapturedVars = numberOfCapturedVariables(m_codeBlock);
    if (VirtualRegister(operand).isArgument())
        return true;
    if (operand <= captureStart(m_codeBlock) && operand > captureEnd(m_codeBlock))
        return true;
    FastBitVector result;
    getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, result);
    return result.get(operand - numCapturedVars);
}
Ejemplo n.º 12
0
 VirtualRegister assign(const Vector<unsigned>& allocation, VirtualRegister src)
 {
     VirtualRegister result = src;
     if (result.isLocal()) {
         unsigned myAllocation = allocation[result.toLocal()];
         if (myAllocation == UINT_MAX)
             result = VirtualRegister();
         else
             result = virtualRegisterForLocal(myAllocation);
     }
     return result;
 }
Ejemplo n.º 13
0
void JITCode::reconstruct(
    ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
    Operands<JSValue>& result)
{
    Operands<ValueRecovery> recoveries;
    reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
    
    result = Operands<JSValue>(OperandsLike, recoveries);
    for (size_t i = result.size(); i--;) {
        int operand = result.operandForIndex(i);
        
        if (operandIsArgument(operand)
            && !VirtualRegister(operand).toArgument()
            && codeBlock->codeType() == FunctionCode
            && codeBlock->specializationKind() == CodeForConstruct) {
            // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
            // also never be used. It doesn't matter what we put into the value for this,
            // but it has to be an actual value that can be grokked by subsequent DFG passes,
            // so we sanitize it here by turning it into Undefined.
            result[i] = jsUndefined();
            continue;
        }
        
        ValueRecovery recovery = recoveries[i];
        JSValue value;
        switch (recovery.technique()) {
        case AlreadyInJSStack:
        case AlreadyInJSStackAsUnboxedCell:
        case AlreadyInJSStackAsUnboxedBoolean:
            value = exec->r(operand).jsValue();
            break;
        case AlreadyInJSStackAsUnboxedInt32:
            value = jsNumber(exec->r(operand).unboxedInt32());
            break;
        case AlreadyInJSStackAsUnboxedInt52:
            value = jsNumber(exec->r(operand).unboxedInt52());
            break;
        case AlreadyInJSStackAsUnboxedDouble:
            value = jsDoubleNumber(exec->r(operand).unboxedDouble());
            break;
        case Constant:
            value = recovery.constant();
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
        result[i] = value;
    }
}
Ejemplo n.º 14
0
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame* inlineCallFrame, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
{
    ValueRecovery argumentCountRecovery;
    VirtualRegister firstArgumentReg;
    if (inlineCallFrame) {
        if (inlineCallFrame->isVarargs()) {
            argumentCountRecovery = ValueRecovery::displacedInJSStack(
                inlineCallFrame->argumentCountRegister, DataFormatInt32);
        } else {
            argumentCountRecovery = ValueRecovery::constant(
                jsNumber(inlineCallFrame->arguments.size()));
        }
        if (inlineCallFrame->arguments.size() > 1)
            firstArgumentReg = inlineCallFrame->arguments[1].virtualRegister();
        else
            firstArgumentReg = VirtualRegister(0);
    } else {
        argumentCountRecovery = ValueRecovery::displacedInJSStack(
            VirtualRegister(JSStack::ArgumentCount), DataFormatInt32);
        firstArgumentReg = VirtualRegister(CallFrame::argumentOffset(0));
    }
    emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, argumentCountRecovery, firstArgumentReg, firstVarArgOffset, slowCase);
}
Ejemplo n.º 15
0
void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
{
    out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()));
    if (executable->isStrictMode())
        out.print(" (StrictMode)");
    out.print(", bc#", caller.bytecodeIndex, ", ", specializationKind());
    if (isClosureCall)
        out.print(", closure call");
    else
        out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
    out.print(", numArgs+this = ", arguments.size());
    out.print(", stack < loc", VirtualRegister(stackOffset).toLocal());
    out.print(">");
}
Ejemplo n.º 16
0
ValueRecovery ValueRep::recoveryForJSValue() const
{
    switch (kind()) {
    case Register:
        return ValueRecovery::inGPR(gpr(), DataFormatJS);
    case Stack:
        RELEASE_ASSERT(!(offsetFromFP() % sizeof(EncodedJSValue)));
        return ValueRecovery::displacedInJSStack(
            VirtualRegister(offsetFromFP() / sizeof(EncodedJSValue)),
            DataFormatJS);
    case Constant:
        return ValueRecovery::constant(JSValue::decode(value()));
    default:
        RELEASE_ASSERT_NOT_REACHED();
        return { };
    }
}
Ejemplo n.º 17
0
    bool run()
    {
        RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
        
        m_graph.clearReplacements();
        m_graph.m_dominators.computeIfNecessary(m_graph);
        
        if (verbose) {
            dataLog("Graph before SSA transformation:\n");
            m_graph.dump();
        }

        // Create a SSACalculator::Variable for every root VariableAccessData.
        for (VariableAccessData& variable : m_graph.m_variableAccessData) {
            if (!variable.isRoot() || variable.isCaptured())
                continue;
            
            SSACalculator::Variable* ssaVariable = m_calculator.newVariable();
            ASSERT(ssaVariable->index() == m_variableForSSAIndex.size());
            m_variableForSSAIndex.append(&variable);
            m_ssaVariableForVariable.add(&variable, ssaVariable);
        }
        
        // Find all SetLocals and create Defs for them. We handle SetArgument by creating a
        // GetLocal, and recording the flush format.
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            
            // Must process the block in forward direction because we want to see the last
            // assignment for every local.
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node* node = block->at(nodeIndex);
                if (node->op() != SetLocal && node->op() != SetArgument)
                    continue;
                
                VariableAccessData* variable = node->variableAccessData();
                if (variable->isCaptured())
                    continue;
                
                Node* childNode;
                if (node->op() == SetLocal)
                    childNode = node->child1().node();
                else {
                    ASSERT(node->op() == SetArgument);
                    childNode = m_insertionSet.insertNode(
                        nodeIndex, node->variableAccessData()->prediction(),
                        GetStack, node->origin,
                        OpInfo(m_graph.m_stackAccessData.add(variable->local(), variable->flushFormat())));
                    if (!ASSERT_DISABLED)
                        m_argumentGetters.add(childNode);
                    m_argumentMapping.add(node, childNode);
                }
                
                m_calculator.newDef(
                    m_ssaVariableForVariable.get(variable), block, childNode);
            }
            
            m_insertionSet.execute(block);
        }
        
        // Decide where Phis are to be inserted. This creates the Phi's but doesn't insert them
        // yet. We will later know where to insert them because SSACalculator is such a bro.
        m_calculator.computePhis(
            [&] (SSACalculator::Variable* ssaVariable, BasicBlock* block) -> Node* {
                VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()];
                
                // Prune by liveness. This doesn't buy us much other than compile times.
                Node* headNode = block->variablesAtHead.operand(variable->local());
                if (!headNode)
                    return nullptr;

                // There is the possibiltiy of "rebirths". The SSA calculator will already prune
                // rebirths for the same VariableAccessData. But it will not be able to prune
                // rebirths that arose from the same local variable number but a different
                // VariableAccessData. We do that pruning here.
                //
                // Here's an example of a rebirth that this would catch:
                //
                //     var x;
                //     if (foo) {
                //         if (bar) {
                //             x = 42;
                //         } else {
                //             x = 43;
                //         }
                //         print(x);
                //         x = 44;
                //     } else {
                //         x = 45;
                //     }
                //     print(x); // Without this check, we'd have a Phi for x = 42|43 here.
                //
                // FIXME: Consider feeding local variable numbers, not VariableAccessData*'s, as
                // the "variables" for SSACalculator. That would allow us to eliminate this
                // special case.
                // https://bugs.webkit.org/show_bug.cgi?id=136641
                if (headNode->variableAccessData() != variable)
                    return nullptr;
                
                Node* phiNode = m_graph.addNode(
                    variable->prediction(), Phi, NodeOrigin());
                FlushFormat format = variable->flushFormat();
                NodeFlags result = resultFor(format);
                phiNode->mergeFlags(result);
                return phiNode;
            });
        
        if (verbose) {
            dataLog("Computed Phis, about to transform the graph.\n");
            dataLog("\n");
            dataLog("Graph:\n");
            m_graph.dump();
            dataLog("\n");
            dataLog("Mappings:\n");
            for (unsigned i = 0; i < m_variableForSSAIndex.size(); ++i)
                dataLog("    ", i, ": ", VariableAccessDataDump(m_graph, m_variableForSSAIndex[i]), "\n");
            dataLog("\n");
            dataLog("SSA calculator: ", m_calculator, "\n");
        }
        
        // Do the bulk of the SSA conversion. For each block, this tracks the operand->Node
        // mapping based on a combination of what the SSACalculator tells us, and us walking over
        // the block in forward order. We use our own data structure, valueForOperand, for
        // determining the local mapping, but we rely on SSACalculator for the non-local mapping.
        //
        // This does three things at once:
        //
        // - Inserts the Phis in all of the places where they need to go. We've already created
        //   them and they are accounted for in the SSACalculator's data structures, but we
        //   haven't inserted them yet, mostly because we want to insert all of a block's Phis in
        //   one go to amortize the cost of node insertion.
        //
        // - Create and insert Upsilons.
        //
        // - Convert all of the preexisting SSA nodes (other than the old CPS Phi nodes) into SSA
        //   form by replacing as follows:
        //
        //   - MovHint has KillLocal prepended to it.
        //
        //   - GetLocal over captured variables lose their phis and become GetStack.
        //
        //   - GetLocal over uncaptured variables die and get replaced with references to the node
        //     specified by valueForOperand.
        //
        //   - SetLocal turns into PutStack if it's flushed, or turns into a Check otherwise.
        //
        //   - Flush loses its children and turns into a Phantom.
        //
        //   - PhantomLocal becomes Phantom, and its child is whatever is specified by
        //     valueForOperand.
        //
        //   - SetArgument is removed. Note that GetStack nodes have already been inserted.
        Operands<Node*> valueForOperand(OperandsLike, m_graph.block(0)->variablesAtHead);
        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
            valueForOperand.clear();
            
            // CPS will claim that the root block has all arguments live. But we have already done
            // the first step of SSA conversion: argument locals are no longer live at head;
            // instead we have GetStack nodes for extracting the values of arguments. So, we
            // skip the at-head available value calculation for the root block.
            if (block != m_graph.block(0)) {
                for (size_t i = valueForOperand.size(); i--;) {
                    Node* nodeAtHead = block->variablesAtHead[i];
                    if (!nodeAtHead)
                        continue;
                    
                    VariableAccessData* variable = nodeAtHead->variableAccessData();
                    if (variable->isCaptured())
                        continue;
                    
                    if (verbose)
                        dataLog("Considering live variable ", VariableAccessDataDump(m_graph, variable), " at head of block ", *block, "\n");
                    
                    SSACalculator::Variable* ssaVariable = m_ssaVariableForVariable.get(variable);
                    SSACalculator::Def* def = m_calculator.reachingDefAtHead(block, ssaVariable);
                    if (!def) {
                        // If we are required to insert a Phi, then we won't have a reaching def
                        // at head.
                        continue;
                    }
                    
                    Node* node = def->value();
                    if (node->replacement) {
                        // This will occur when a SetLocal had a GetLocal as its source. The
                        // GetLocal would get replaced with an actual SSA value by the time we get
                        // here. Note that the SSA value with which the GetLocal got replaced
                        // would not in turn have a replacement.
                        node = node->replacement;
                        ASSERT(!node->replacement);
                    }
                    if (verbose)
                        dataLog("Mapping: ", VirtualRegister(valueForOperand.operandForIndex(i)), " -> ", node, "\n");
                    valueForOperand[i] = node;
                }
            }
            
            // Insert Phis by asking the calculator what phis there are in this block. Also update
            // valueForOperand with those Phis. For Phis associated with variables that are not
            // flushed, we also insert a MovHint.
            size_t phiInsertionPoint = 0;
            for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(block)) {
                VariableAccessData* variable = m_variableForSSAIndex[phiDef->variable()->index()];
                
                m_insertionSet.insert(phiInsertionPoint, phiDef->value());
                valueForOperand.operand(variable->local()) = phiDef->value();
                
                m_insertionSet.insertNode(
                    phiInsertionPoint, SpecNone, MovHint, NodeOrigin(),
                    OpInfo(variable->local().offset()), phiDef->value()->defaultEdge());
            }
            
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node* node = block->at(nodeIndex);
                
                if (verbose) {
                    dataLog("Processing node ", node, ":\n");
                    m_graph.dump(WTF::dataFile(), "    ", node);
                }
                
                m_graph.performSubstitution(node);
                
                switch (node->op()) {
                case MovHint: {
                    m_insertionSet.insertNode(
                        nodeIndex, SpecNone, KillStack, node->origin,
                        OpInfo(node->unlinkedLocal().offset()));
                    break;
                }
                    
                case SetLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    
                    if (variable->isCaptured() || !!(node->flags() & NodeIsFlushed)) {
                        node->convertToPutStack(
                            m_graph.m_stackAccessData.add(
                                variable->local(), variable->flushFormat()));
                    } else
                        node->setOpAndDefaultFlags(Check);
                    
                    if (!variable->isCaptured()) {
                        if (verbose)
                            dataLog("Mapping: ", variable->local(), " -> ", node->child1().node(), "\n");
                        valueForOperand.operand(variable->local()) = node->child1().node();
                    }
                    break;
                }
                    
                case GetStack: {
                    ASSERT(m_argumentGetters.contains(node));
                    valueForOperand.operand(node->stackAccessData()->local) = node;
                    break;
                }
                    
                case GetLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    node->children.reset();
                    
                    if (variable->isCaptured()) {
                        node->convertToGetStack(m_graph.m_stackAccessData.add(variable->local(), variable->flushFormat()));
                        break;
                    }
                    
                    node->convertToPhantom();
                    if (verbose)
                        dataLog("Replacing node ", node, " with ", valueForOperand.operand(variable->local()), "\n");
                    node->replacement = valueForOperand.operand(variable->local());
                    break;
                }
                    
                case Flush: {
                    node->children.reset();
                    node->convertToPhantom();
                    break;
                }
                    
                case PhantomLocal: {
                    ASSERT(node->child1().useKind() == UntypedUse);
                    VariableAccessData* variable = node->variableAccessData();
                    if (variable->isCaptured()) {
                        // This is a fun case. We could have a captured variable that had some
                        // or all of its uses strength reduced to phantoms rather than flushes.
                        // SSA conversion will currently still treat it as flushed, in the sense
                        // that it will just keep the SetLocal. Therefore, there is nothing that
                        // needs to be done here: we don't need to also keep the source value
                        // alive. And even if we did want to keep the source value alive, we
                        // wouldn't be able to, because the variablesAtHead value for a captured
                        // local wouldn't have been computed by the Phi reduction algorithm
                        // above.
                        node->children.reset();
                    } else
                        node->child1() = valueForOperand.operand(variable->local())->defaultEdge();
                    node->convertToPhantom();
                    break;
                }
                    
                case SetArgument: {
                    node->convertToPhantom();
                    break;
                }
                    
                default:
                    break;
                }
            }
            
            // We want to insert Upsilons just before the end of the block. On the surface this
            // seems dangerous because the Upsilon will have a checking UseKind. But, we will not
            // actually be performing the check at the point of the Upsilon; the check will
            // already have been performed at the point where the original SetLocal was.
            size_t upsilonInsertionPoint = block->size() - 1;
            NodeOrigin upsilonOrigin = block->last()->origin;
            for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
                BasicBlock* successorBlock = block->successor(successorIndex);
                for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(successorBlock)) {
                    Node* phiNode = phiDef->value();
                    SSACalculator::Variable* ssaVariable = phiDef->variable();
                    VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()];
                    FlushFormat format = variable->flushFormat();
                    UseKind useKind = useKindFor(format);
                    
                    m_insertionSet.insertNode(
                        upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
                        OpInfo(phiNode), Edge(
                            valueForOperand.operand(variable->local()),
                            useKind));
                }
            }
            
            m_insertionSet.execute(block);
        }
        
        // Free all CPS phis and reset variables vectors.
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            for (unsigned phiIndex = block->phis.size(); phiIndex--;)
                m_graph.m_allocator.free(block->phis[phiIndex]);
            block->phis.clear();
            block->variablesAtHead.clear();
            block->variablesAtTail.clear();
            block->valuesAtHead.clear();
            block->valuesAtHead.clear();
            block->ssa = std::make_unique<BasicBlock::SSAData>(block);
        }
        
        m_graph.m_argumentFormats.resize(m_graph.m_arguments.size());
        for (unsigned i = m_graph.m_arguments.size(); i--;) {
            FlushFormat format = FlushedJSValue;

            Node* node = m_argumentMapping.get(m_graph.m_arguments[i]);

            // m_argumentMapping.get could return null for a captured local. That's fine. We only
            // track the argument loads of those arguments for which we speculate type. We don't
            // speculate type for captured arguments.
            if (node)
                format = node->stackAccessData()->format;
            
            m_graph.m_argumentFormats[i] = format;
            m_graph.m_arguments[i] = node; // Record the load that loads the arguments for the benefit of exit profiling.
        }
        
        m_graph.m_form = SSA;

        if (verbose) {
            dataLog("Graph after SSA transformation:\n");
            m_graph.dump();
        }

        return true;
    }
Ejemplo n.º 18
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && VirtualRegister(arguments) == m_codeBlock->argumentsRegister()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitLoadTag(arguments, regT1);
        slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));

        load32(payloadFor(JSStack::ArgumentCount), regT2);
        slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT2: argumentCountIncludingThis

        move(regT2, regT3);
        neg32(regT3);
        add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3);
        lshift32(TrustedImm32(3), regT3);
        addPtr(callFrameRegister, regT3);
        // regT3: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3));

        // Initialize ArgumentCount.
        store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));

        // Initialize 'this'.
        emitLoad(thisValue, regT1, regT0);
        store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));

        // Copy arguments.
        end.append(branchSub32(Zero, TrustedImm32(1), regT2));
        // regT2: argumentCount;

        Label copyLoop = label();
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
        load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
        store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
        branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitLoad(thisValue, regT1, regT0);
    emitLoad(arguments, regT3, regT2);
    callOperation(operationLoadVarargs, regT1, regT0, regT3, regT2, firstFreeRegister);
    move(returnValueRegister, regT3);

    if (canOptimize)
        end.link(this);
}
Ejemplo n.º 19
0
SUPPRESS_ASAN
void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
{
    ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
    ASSERT(codeBlock->alternative());
    ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
    ASSERT(!codeBlock->jitCodeMap());

    if (!Options::useOSREntryToDFG())
        return 0;

    if (Options::verboseOSR()) {
        dataLog(
            "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
            " from bc#", bytecodeIndex, "\n");
    }
    
    VM* vm = &exec->vm();

    sanitizeStackForVM(vm);
    
    if (bytecodeIndex)
        codeBlock->ownerScriptExecutable()->setDidTryToEnterInLoop(true);
    
    if (codeBlock->jitType() != JITCode::DFGJIT) {
        RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT);
        
        // When will this happen? We could have:
        //
        // - An exit from the FTL JIT into the baseline JIT followed by an attempt
        //   to reenter. We're fine with allowing this to fail. If it happens
        //   enough we'll just reoptimize. It basically means that the OSR exit cost
        //   us dearly and so reoptimizing is the right thing to do.
        //
        // - We have recursive code with hot loops. Consider that foo has a hot loop
        //   that calls itself. We have two foo's on the stack, lets call them foo1
        //   and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
        //   optimized all the way into the FTL. Then it returns into foo1, and then
        //   foo1 wants to get optimized. It might reach this conclusion from its
        //   hot loop and attempt to OSR enter. And we'll tell it that it can't. It
        //   might be worth addressing this case, but I just think this case will
        //   be super rare. For now, if it does happen, it'll cause some compilation
        //   thrashing.
        
        if (Options::verboseOSR())
            dataLog("    OSR failed because the target code block is not DFG.\n");
        return 0;
    }
    
    JITCode* jitCode = codeBlock->jitCode()->dfg();
    OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
    
    if (!entry) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because the entrypoint was optimized out.\n");
        return 0;
    }
    
    ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
    
    // The code below checks if it is safe to perform OSR entry. It may find
    // that it is unsafe to do so, for any number of reasons, which are documented
    // below. If the code decides not to OSR then it returns 0, and it's the caller's
    // responsibility to patch up the state in such a way as to ensure that it's
    // both safe and efficient to continue executing baseline code for now. This
    // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
    // or codeBlock->dontOptimizeAnytimeSoon().
    
    // 1) Verify predictions. If the predictions are inconsistent with the actual
    //    values, then OSR entry is not possible at this time. It's tempting to
    //    assume that we could somehow avoid this case. We can certainly avoid it
    //    for first-time loop OSR - that is, OSR into a CodeBlock that we have just
    //    compiled. Then we are almost guaranteed that all of the predictions will
    //    check out. It would be pretty easy to make that a hard guarantee. But
    //    then there would still be the case where two call frames with the same
    //    baseline CodeBlock are on the stack at the same time. The top one
    //    triggers compilation and OSR. In that case, we may no longer have
    //    accurate value profiles for the one deeper in the stack. Hence, when we
    //    pop into the CodeBlock that is deeper on the stack, we might OSR and
    //    realize that the predictions are wrong. Probably, in most cases, this is
    //    just an anomaly in the sense that the older CodeBlock simply went off
    //    into a less-likely path. So, the wisest course of action is to simply not
    //    OSR at this time.
    
    for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
        if (argument >= exec->argumentCountIncludingThis()) {
            if (Options::verboseOSR()) {
                dataLogF("    OSR failed because argument %zu was not passed, expected ", argument);
                entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
                dataLogF(".\n");
            }
            return 0;
        }
        
        JSValue value;
        if (!argument)
            value = exec->thisValue();
        else
            value = exec->argument(argument - 1);
        
        if (!entry->m_expectedValues.argument(argument).validate(value)) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because argument ", argument, " is ", value,
                    ", expected ", entry->m_expectedValues.argument(argument), ".\n");
            }
            return 0;
        }
    }
    
    for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
        int localOffset = virtualRegisterForLocal(local).offset();
        if (entry->m_localsForcedDouble.get(local)) {
            if (!exec->registers()[localOffset].asanUnsafeJSValue().isNumber()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].asanUnsafeJSValue(), ", expected number.\n");
                }
                return 0;
            }
            continue;
        }
        if (entry->m_localsForcedAnyInt.get(local)) {
            if (!exec->registers()[localOffset].asanUnsafeJSValue().isAnyInt()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
                        "machine int.\n");
                }
                return 0;
            }
            continue;
        }
        if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].asanUnsafeJSValue())) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because variable ", localOffset, " is ",
                    exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
                    entry->m_expectedValues.local(local), ".\n");
            }
            return 0;
        }
    }

    // 2) Check the stack height. The DFG JIT may require a taller stack than the
    //    baseline JIT, in some cases. If we can't grow the stack, then don't do
    //    OSR right now. That's the only option we have unless we want basic block
    //    boundaries to start throwing RangeErrors. Although that would be possible,
    //    it seems silly: you'd be diverting the program to error handling when it
    //    would have otherwise just kept running albeit less quickly.
    
    unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm->interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()])) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    if (Options::verboseOSR())
        dataLogF("    OSR should succeed.\n");

    // At this point we're committed to entering. We will do some work to set things up,
    // but we also rely on our caller recognizing that when we return a non-null pointer,
    // that means that we're already past the point of no return and we must succeed at
    // entering.
    
    // 3) Set up the data in the scratch buffer and perform data format conversions.

    unsigned frameSize = jitCode->common.frameRegisterCount;
    unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
    unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);

    Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + JSStack::CallFrameHeaderSize + maxFrameSize))->dataBuffer());
    
    *bitwise_cast<size_t*>(scratch + 0) = frameSize;
    
    void* targetPC = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset);
    if (Options::verboseOSR())
        dataLogF("    OSR using target PC %p.\n", targetPC);
    RELEASE_ASSERT(targetPC);
    *bitwise_cast<void**>(scratch + 1) = targetPC;
    
    Register* pivot = scratch + 2 + JSStack::CallFrameHeaderSize;
    
    for (int index = -JSStack::CallFrameHeaderSize; index < static_cast<int>(baselineFrameSize); ++index) {
        VirtualRegister reg(-1 - index);
        
        if (reg.isLocal()) {
            if (entry->m_localsForcedDouble.get(reg.toLocal())) {
                *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
                continue;
            }
            
            if (entry->m_localsForcedAnyInt.get(reg.toLocal())) {
                *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asAnyInt() << JSValue::int52ShiftAmount;
                continue;
            }
        }
        
        pivot[index] = exec->registers()[reg.offset()].asanUnsafeJSValue();
    }
    
    // 4) Reshuffle those registers that need reshuffling.
    Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
    
    // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
    //    some programs by eliminating some stale pointer pathologies.
    for (unsigned i = frameSize; i--;) {
        if (entry->m_machineStackUsed.get(i))
            continue;
        pivot[i] = JSValue();
    }

    // 6) Copy our callee saves to buffer.
#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
    RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
    RegisterAtOffsetList* allCalleeSaves = vm->getAllCalleeSaveRegisterOffsets();
    RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());

    unsigned registerCount = registerSaveLocations->size();
    VMEntryRecord* record = vmEntryRecord(vm->topVMEntryFrame);
    for (unsigned i = 0; i < registerCount; i++) {
        RegisterAtOffset currentEntry = registerSaveLocations->at(i);
        if (dontSaveRegisters.get(currentEntry.reg()))
            continue;
        RegisterAtOffset* calleeSavesEntry = allCalleeSaves->find(currentEntry.reg());
        
        *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
    }
#endif
    
    // 7) Fix the call frame to have the right code block.
    
    *bitwise_cast<CodeBlock**>(pivot - 1 - JSStack::CodeBlock) = codeBlock;
    
    if (Options::verboseOSR())
        dataLogF("    OSR returning data buffer %p.\n", scratch);
    return scratch;
}
Ejemplo n.º 20
0
static void computeUsesForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, FastBitVector& uses)
{
    Interpreter* interpreter = codeBlock->vm()->interpreter;
    Instruction* instructionsBegin = codeBlock->instructions().begin();
    Instruction* instruction = &instructionsBegin[bytecodeOffset];
    OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode);
    switch (opcodeID) {
    // No uses.
    case op_new_regexp:
    case op_new_array_buffer:
    case op_throw_static_error:
    case op_debug:
    case op_resolve_scope:
    case op_pop_scope:
    case op_jneq_ptr:
    case op_new_func_exp:
    case op_loop_hint:
    case op_jmp:
    case op_new_object:
    case op_init_lazy_reg:
    case op_get_callee:
    case op_enter:
    case op_catch:
        return;
    // First argument.
    case op_new_func:
    case op_create_activation: 
    case op_create_arguments:
    case op_to_this:
    case op_tear_off_activation:
    case op_profile_will_call:
    case op_profile_did_call:
    case op_throw:
    case op_push_with_scope:
    case op_end:
    case op_ret:
    case op_jtrue:
    case op_jfalse:
    case op_jeq_null:
    case op_jneq_null:
    case op_dec:
    case op_inc: {
        if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand))
            setForOperand(codeBlock, uses, instruction[1].u.operand);
        return;
    }
    // First and second arguments.
    case op_del_by_id:
    case op_ret_object_or_this:
    case op_jlesseq:
    case op_jgreater:
    case op_jgreatereq:
    case op_jnless:
    case op_jnlesseq:
    case op_jngreater:
    case op_jngreatereq:
    case op_jless: {
        if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand))
            setForOperand(codeBlock, uses, instruction[1].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        return;
    }
    // First, second, and third arguments.
    case op_del_by_val:
    case op_put_by_val_direct:
    case op_put_by_val: {
        if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand))
            setForOperand(codeBlock, uses, instruction[1].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        return;
    }
    // First and third arguments.
    case op_put_by_index:
    case op_put_by_id_replace:
    case op_put_by_id_transition:
    case op_put_by_id_transition_direct:
    case op_put_by_id_transition_direct_out_of_line:
    case op_put_by_id_transition_normal:
    case op_put_by_id_transition_normal_out_of_line:
    case op_put_by_id_generic:
    case op_put_by_id_out_of_line:
    case op_put_by_id:
    case op_put_to_scope: {
        if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand))
            setForOperand(codeBlock, uses, instruction[1].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        return;
    }
    // First, third, and fourth arguments.
    case op_put_getter_setter: {
        if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand))
            setForOperand(codeBlock, uses, instruction[1].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[4].u.operand))
            setForOperand(codeBlock, uses, instruction[4].u.operand);
        return;
    }
    // Second argument.
    case op_init_global_const_nop:
    case op_init_global_const:
    case op_push_name_scope:
    case op_get_from_scope:
    case op_to_primitive:
    case op_get_by_id:
    case op_get_by_id_out_of_line:
    case op_get_by_id_self:
    case op_get_by_id_proto:
    case op_get_by_id_chain:
    case op_get_by_id_getter_self:
    case op_get_by_id_getter_proto:
    case op_get_by_id_getter_chain:
    case op_get_by_id_custom_self:
    case op_get_by_id_custom_proto:
    case op_get_by_id_custom_chain:
    case op_get_by_id_generic:
    case op_get_array_length:
    case op_get_string_length:
    case op_get_arguments_length:
    case op_typeof:
    case op_is_undefined:
    case op_is_boolean:
    case op_is_number:
    case op_is_string:
    case op_is_object:
    case op_is_function:
    case op_to_number:
    case op_negate:
    case op_neq_null:
    case op_eq_null:
    case op_not:
    case op_mov:
    case op_new_array_with_size:
    case op_create_this: {
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        return;
    }
    // Second and third arguments.
    case op_get_by_val:
    case op_get_argument_by_val:
    case op_in:
    case op_instanceof:
    case op_check_has_instance:
    case op_add:
    case op_mul:
    case op_div:
    case op_mod:
    case op_sub:
    case op_lshift:
    case op_rshift:
    case op_urshift:
    case op_bitand:
    case op_bitxor:
    case op_bitor:
    case op_less:
    case op_lesseq:
    case op_greater:
    case op_greatereq:
    case op_nstricteq:
    case op_stricteq:
    case op_neq:
    case op_eq: {
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        return;
    }
    // Second, third, and fourth arguments.
    case op_call_varargs:
    case op_get_pnames: {
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[4].u.operand))
            setForOperand(codeBlock, uses, instruction[4].u.operand);
        return;
    }
    // Second, third, fourth, and fifth arguments.
    case op_next_pname: {
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[4].u.operand))
            setForOperand(codeBlock, uses, instruction[4].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[5].u.operand))
            setForOperand(codeBlock, uses, instruction[5].u.operand);
        return;
    }
    // Second, third, fourth, fifth, and sixth arguments. 
    case op_get_by_pname: {
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[4].u.operand))
            setForOperand(codeBlock, uses, instruction[4].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[5].u.operand))
            setForOperand(codeBlock, uses, instruction[5].u.operand);
        if (isValidRegisterForLiveness(codeBlock, instruction[6].u.operand))
            setForOperand(codeBlock, uses, instruction[6].u.operand);
        return;
    }
    // Third argument.
    case op_switch_string:
    case op_switch_char:
    case op_switch_imm: {
        if (isValidRegisterForLiveness(codeBlock, instruction[3].u.operand))
            setForOperand(codeBlock, uses, instruction[3].u.operand);
        return;
    }
    // Variable number of arguments.
    case op_new_array:
    case op_strcat: {
        int base = instruction[2].u.operand;
        int count = instruction[3].u.operand;
        for (int i = 0; i < count; i++) {
            if (isValidRegisterForLiveness(codeBlock, base + i))
                setForOperand(codeBlock, uses, base + i);
        }
        return;
    }
    // Crazy stuff for calls.
    case op_construct:
    case op_call_eval:
    case op_call: {
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        int argCount = instruction[3].u.operand;
        int registerOffset = instruction[4].u.operand;
        int lastArg = registerOffset + CallFrame::thisArgumentOffset();
        for (int i = 0; i < argCount; i++) {
            if (isValidRegisterForLiveness(codeBlock, lastArg - i))
                setForOperand(codeBlock, uses, lastArg - i);
        }
        return;
    }
    // Special stuff for tear off arguments.
    case op_tear_off_arguments: {
        if (isValidRegisterForLiveness(codeBlock, instruction[1].u.operand))
            setForOperand(codeBlock, uses, instruction[1].u.operand);
        if (isValidRegisterForLiveness(codeBlock, unmodifiedArgumentsRegister(VirtualRegister(instruction[1].u.operand)).offset()))
            setForOperand(codeBlock, uses, unmodifiedArgumentsRegister(VirtualRegister(instruction[1].u.operand)).offset());
        if (isValidRegisterForLiveness(codeBlock, instruction[2].u.operand))
            setForOperand(codeBlock, uses, instruction[2].u.operand);
        return;
    }
#define LLINT_HELPER_OPCODES(opcode, length) case opcode:
    FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES)
        return;
#undef LLINT_HELPER_OPCODES
    }
}
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
    ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
    jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));

    CodeOrigin codeOrigin;
    for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
        CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
        CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
        unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
        CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex);
        
        void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress();

        GPRReg callerFrameGPR;
        if (inlineCallFrame->caller.inlineCallFrame) {
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
            callerFrameGPR = GPRInfo::regT3;
        } else
            callerFrameGPR = GPRInfo::callFrameRegister;
        
#if USE(JSVALUE64)
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        
        // Leave the captured arguments in regT3.
        if (baselineCodeBlock->usesArguments())
            jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#else // USE(JSVALUE64) // so this is the 32-bit part
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));

        // Leave the captured arguments in regT3.
        if (baselineCodeBlock->usesArguments())
            jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
        
        if (baselineCodeBlock->usesArguments()) {
            AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3);
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
            jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters()));
            noArguments.link(&jit);
        }
    }

#if USE(JSVALUE64)
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
#else
    Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
    jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}
void LocalOSRAvailabilityCalculator::executeNode(Node* node)
{
    switch (node->op()) {
    case PutStack: {
        StackAccessData* data = node->stackAccessData();
        m_availability.m_locals.operand(data->local).setFlush(data->flushedAt());
        break;
    }

    case KillStack: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush));
        break;
    }

    case GetStack: {
        StackAccessData* data = node->stackAccessData();
        m_availability.m_locals.operand(data->local) = Availability(node, data->flushedAt());
        break;
    }

    case MovHint: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node());
        break;
    }

    case ZombieHint: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable();
        break;
    }

    case LoadVarargs:
    case ForwardVarargs: {
        LoadVarargsData* data = node->loadVarargsData();
        m_availability.m_locals.operand(data->count) =
            Availability(FlushedAt(FlushedInt32, data->machineCount));
        for (unsigned i = data->limit; i--;) {
            m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) =
                Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i)));
        }
        break;
    }

    case PhantomCreateRest:
    case PhantomDirectArguments:
    case PhantomClonedArguments: {
        InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
        if (!inlineCallFrame) {
            // We don't need to record anything about how the arguments are to be recovered. It's just a
            // given that we can read them from the stack.
            break;
        }

        unsigned numberOfArgumentsToSkip = 0;
        if (node->op() == PhantomCreateRest)
            numberOfArgumentsToSkip = node->numberOfArgumentsToSkip();

        if (inlineCallFrame->isVarargs()) {
            // Record how to read each argument and the argument count.
            Availability argumentCount =
                m_availability.m_locals.operand(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);

            m_availability.m_heap.set(PromotedHeapLocation(ArgumentCountPLoc, node), argumentCount);
        }

        if (inlineCallFrame->isClosureCall) {
            Availability callee = m_availability.m_locals.operand(
                inlineCallFrame->stackOffset + CallFrameSlot::callee);
            m_availability.m_heap.set(PromotedHeapLocation(ArgumentsCalleePLoc, node), callee);
        }

        for (unsigned i = numberOfArgumentsToSkip; i < inlineCallFrame->arguments.size() - 1; ++i) {
            Availability argument = m_availability.m_locals.operand(
                inlineCallFrame->stackOffset + CallFrame::argumentOffset(i));

            m_availability.m_heap.set(PromotedHeapLocation(ArgumentPLoc, node, i), argument);
        }
        break;
    }

    case PutHint: {
        m_availability.m_heap.set(
            PromotedHeapLocation(node->child1().node(), node->promotedLocationDescriptor()),
            Availability(node->child2().node()));
        break;
    }

    case PhantomSpread:
        m_availability.m_heap.set(PromotedHeapLocation(SpreadPLoc, node), Availability(node->child1().node()));
        break;

    case PhantomNewArrayWithSpread:
        for (unsigned i = 0; i < node->numChildren(); i++) {
            Node* child = m_graph.varArgChild(node, i).node();
            m_availability.m_heap.set(PromotedHeapLocation(NewArrayWithSpreadArgumentPLoc, node, i), Availability(child));
        }
        break;

    default:
        break;
    }
}
Ejemplo n.º 23
0
static void fixFunctionBasedOnStackMaps(
    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo)
{
    Graph& graph = state.graph;
    VM& vm = graph.m_vm;
    StackMaps stackmaps = jitCode->stackmaps;
    
    int localsOffset =
        offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
    
    int varargsSpillSlotsOffset;
    if (state.varargsSpillSlotsStackmapID != UINT_MAX)
        varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
    else
        varargsSpillSlotsOffset = 0;
    
    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
        
        if (inlineCallFrame->argumentsRegister.isValid())
            inlineCallFrame->argumentsRegister += localsOffset;
        
        if (inlineCallFrame->argumentCountRegister.isValid())
            inlineCallFrame->argumentCountRegister += localsOffset;
        
        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
            inlineCallFrame->arguments[argument] =
                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
        }
        
        if (inlineCallFrame->isClosureCall) {
            inlineCallFrame->calleeRecovery =
                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
        }
    }
    
    if (codeBlock->usesArguments()) {
        codeBlock->setArgumentsRegister(
            VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset));
    }

    MacroAssembler::Label stackOverflowException;

    {
        CCallHelpers checkJIT(&vm, codeBlock);
        
        // At this point it's perfectly fair to just blow away all state and restore the
        // JS JIT view of the universe.
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        stackOverflowException = checkJIT.label();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, checkJIT, codeBlock, JITCompilationMustSucceed);
        linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
        linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));

        state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
    }

    ExitThunkGenerator exitThunkGenerator(state);
    exitThunkGenerator.emitThunks();
    if (exitThunkGenerator.didThings()) {
        RELEASE_ASSERT(state.finalizer->osrExit.size());
        RELEASE_ASSERT(didSeeUnwindInfo);
        
        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed);
        
        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
        
        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
            OSRExit& exit = jitCode->osrExit[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");

            auto iter = recordMap.find(exit.m_stackmapID);
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
            
            for (unsigned j = exit.m_values.size(); j--;) {
                ExitValue value = exit.m_values[j];
                if (!value.isInJSStackSomehow())
                    continue;
                if (!value.virtualRegister().isLocal())
                    continue;
                exit.m_values[j] = value.withVirtualRegister(
                    VirtualRegister(value.virtualRegister().offset() + localsOffset));
            }
            
            if (verboseCompilationEnabled()) {
                DumpContext context;
                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
            }
        }
        
        state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
    }

    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
        CCallHelpers slowPathJIT(&vm, codeBlock);
        
        CCallHelpers::JumpList exceptionTarget;
        
        for (unsigned i = state.getByIds.size(); i--;) {
            GetByIdDescriptor& getById = state.getByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
            
            auto iter = recordMap.find(getById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
            
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg result = record.locations[0].directGPR();
                GPRReg base = record.locations[1].directGPR();
                
                JITGetByIdGenerator gen(
                    codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(result), NeedToSpill);
                
                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget,
                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());

                gen.reportSlowPathCall(begin, call);

                getById.m_slowPathDone.append(slowPathJIT.jump());
                getById.m_generators.append(gen);
            }
        }
        
        for (unsigned i = state.putByIds.size(); i--;) {
            PutByIdDescriptor& putById = state.putByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
            
            auto iter = recordMap.find(putById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg base = record.locations[0].directGPR();
                GPRReg value = record.locations[1].directGPR();
                
                JITPutByIdGenerator gen(
                    codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
                    putById.ecmaMode(), putById.putKind());
                
                MacroAssembler::Label begin = slowPathJIT.label();
                
                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget,
                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
                
                gen.reportSlowPathCall(begin, call);
                
                putById.m_slowPathDone.append(slowPathJIT.jump());
                putById.m_generators.append(gen);
            }
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            CheckInDescriptor& checkIn = state.checkIns[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
            
            auto iter = recordMap.find(checkIn.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                RegisterSet usedRegisters = usedRegistersFor(record);
                GPRReg result = record.locations[0].directGPR();
                GPRReg obj = record.locations[1].directGPR();
                StructureStubInfo* stubInfo = codeBlock->addStubInfo(); 
                stubInfo->codeOrigin = checkIn.codeOrigin();
                stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
                stubInfo->patch.valueGPR = static_cast<int8_t>(result);
                stubInfo->patch.usedRegisters = usedRegisters;
                stubInfo->patch.spillMode = NeedToSpill;

                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call slowCall = callOperation(
                    state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget,
                    operationInOptimize, result, stubInfo, obj, checkIn.m_id);

                checkIn.m_slowPathDone.append(slowPathJIT.jump());
                
                checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
            }
        }
        
        exceptionTarget.link(&slowPathJIT);
        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
        
        state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed);
        state.finalizer->sideCodeLinkBuffer->link(
            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        
        for (unsigned i = state.getByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
                sizeOfGetById());
        }
        for (unsigned i = state.putByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
                sizeOfPutById());
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            generateCheckInICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                sizeOfIn()); 
        } 
    }
    
    adjustCallICsForStackmaps(state.jsCalls, recordMap);
    
    for (unsigned i = state.jsCalls.size(); i--;) {
        JSCall& call = state.jsCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        
        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall());
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size());
        
        call.link(vm, linkBuffer);
    }
    
    adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
    
    for (unsigned i = state.jsCallVarargses.size(); i--;) {
        JSCallVarargs& call = state.jsCallVarargses[i];
        
        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, graph, varargsSpillSlotsOffset);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = sizeOfICFor(call.node());

        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC);
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for varargs call (specifically, ", Graph::opName(call.node()->op()), ") because we thought the size would be ", sizeOfIC, " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size());
        
        call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
    }
    
    RepatchBuffer repatchBuffer(codeBlock);

    auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);

            RELEASE_ASSERT(stackOverflowException.isSet());

            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
        }
    }
    
    iter = recordMap.find(state.handleExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
            
            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        }
    }
    
    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
        OSRExit& exit = jitCode->osrExit[exitIndex];
        iter = recordMap.find(exit.m_stackmapID);
        
        Vector<const void*> codeAddresses;
        
        if (iter != recordMap.end()) {
            for (unsigned i = iter->value.size(); i--;) {
                StackMaps::Record& record = iter->value[i];
                
                CodeLocationLabel source = CodeLocationLabel(
                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
                
                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
                
                if (info.m_isInvalidationPoint)
                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
                else
                    repatchBuffer.replaceWithJump(source, info.m_thunkAddress);
            }
        }
        
        if (graph.compilation())
            graph.compilation()->addOSRExitSite(codeAddresses);
    }
}
    bool run()
    {
        ScoreBoard scoreBoard(m_graph.m_nextMachineLocal);
        scoreBoard.assertClear();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
        bool needsNewLine = false;
#endif
        for (size_t blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            if (!block->isReachable)
                continue;
            for (size_t indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
                Node* node = block->at(indexInBlock);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
                if (needsNewLine)
                    dataLogF("\n");
                dataLogF("   @%u:", node->index());
                needsNewLine = true;
#endif
        
                if (!node->shouldGenerate())
                    continue;
                
                switch (node->op()) {
                case Phi:
                case Flush:
                case PhantomLocal:
                    continue;
                case GetLocal:
                    ASSERT(!node->child1()->hasResult());
                    break;
                default:
                    break;
                }
                
                // First, call use on all of the current node's children, then
                // allocate a VirtualRegister for this node. We do so in this
                // order so that if a child is on its last use, and a
                // VirtualRegister is freed, then it may be reused for node.
                if (node->flags() & NodeHasVarArgs) {
                    for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
                        scoreBoard.useIfHasResult(m_graph.m_varArgChildren[childIdx]);
                } else {
                    scoreBoard.useIfHasResult(node->child1());
                    scoreBoard.useIfHasResult(node->child2());
                    scoreBoard.useIfHasResult(node->child3());
                }

                if (!node->hasResult())
                    continue;

                VirtualRegister virtualRegister = scoreBoard.allocate();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
                dataLogF(
                    " Assigning virtual register %u to node %u.",
                    virtualRegister, node->index());
#endif
                node->setVirtualRegister(virtualRegister);
                // 'mustGenerate' nodes have their useCount artificially elevated,
                // call use now to account for this.
                if (node->mustGenerate())
                    scoreBoard.use(node);
            }
            scoreBoard.assertClear();
        }
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
        if (needsNewLine)
            dataLogF("\n");
#endif
        
        // Record the number of virtual registers we're using. This is used by calls
        // to figure out where to put the parameters.
        m_graph.m_nextMachineLocal = scoreBoard.highWatermark();

        // 'm_numCalleeRegisters' is the number of locals and temporaries allocated
        // for the function (and checked for on entry). Since we perform a new and
        // different allocation of temporaries, more registers may now be required.
        // This also accounts for the number of temporaries that may be needed if we
        // OSR exit, due to inlining. Hence this computes the number of temporaries
        // that could be used by this code block even if it exits; it may be more
        // than what this code block needs if it never exits.
        unsigned calleeRegisters = scoreBoard.highWatermark() + m_graph.m_parameterSlots;
        for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) {
            InlineCallFrame* inlineCallFrame = *iter;
            CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
            unsigned requiredCalleeRegisters = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + codeBlock->m_numCalleeRegisters;
            if (requiredCalleeRegisters > calleeRegisters)
                calleeRegisters = requiredCalleeRegisters;
        }
        if ((unsigned)codeBlock()->m_numCalleeRegisters < calleeRegisters)
            codeBlock()->m_numCalleeRegisters = calleeRegisters;
#if DFG_ENABLE(DEBUG_VERBOSE)
        dataLogF("Num callee registers: %u\n", calleeRegisters);
#endif
        
        return true;
    }
    void handleBlockForTryCatch(BasicBlock* block, InsertionSet& insertionSet)
    {
        HandlerInfo* currentExceptionHandler = nullptr;
        FastBitVector liveAtCatchHead;
        liveAtCatchHead.resize(m_graph.block(0)->variablesAtTail.numberOfLocals());

        HandlerInfo* cachedHandlerResult;
        CodeOrigin cachedCodeOrigin;
        auto catchHandler = [&] (CodeOrigin origin) -> HandlerInfo* {
            ASSERT(origin);
            if (origin == cachedCodeOrigin)
                return cachedHandlerResult;

            unsigned bytecodeIndexToCheck = origin.bytecodeIndex;

            cachedCodeOrigin = origin;

            while (1) {
                InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
                CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
                if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) {
                    liveAtCatchHead.clearAll();

                    unsigned catchBytecodeIndex = handler->target;
                    m_graph.forAllLocalsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (VirtualRegister operand) {
                        liveAtCatchHead[operand.toLocal()] = true;
                    });

                    cachedHandlerResult = handler;
                    break;
                }

                if (!inlineCallFrame) {
                    cachedHandlerResult = nullptr;
                    break;
                }

                bytecodeIndexToCheck = inlineCallFrame->directCaller.bytecodeIndex;
                origin = inlineCallFrame->directCaller;
            }

            return cachedHandlerResult;
        };

        Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        HashSet<InlineCallFrame*> seenInlineCallFrames;

        auto flushEverything = [&] (NodeOrigin origin, unsigned index) {
            RELEASE_ASSERT(currentExceptionHandler);
            auto flush = [&] (VirtualRegister operand, bool alwaysInsert) {
                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()]) 
                    || operand.isArgument()
                    || alwaysInsert) {

                    ASSERT(isValidFlushLocation(block, index, operand));

                    VariableAccessData* accessData = currentBlockAccessData.operand(operand);
                    if (!accessData)
                        accessData = newVariableAccessData(operand);

                    currentBlockAccessData.operand(operand) = accessData;

                    insertionSet.insertNode(index, SpecNone, 
                        Flush, origin, OpInfo(accessData));
                }
            };

            for (unsigned local = 0; local < block->variablesAtTail.numberOfLocals(); local++)
                flush(virtualRegisterForLocal(local), false);
            for (InlineCallFrame* inlineCallFrame : seenInlineCallFrames)
                flush(VirtualRegister(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()), true);
            flush(VirtualRegister(CallFrame::thisArgumentOffset()), true);

            seenInlineCallFrames.clear();
        };

        for (unsigned nodeIndex = 0; nodeIndex < block->size(); nodeIndex++) {
            Node* node = block->at(nodeIndex);

            {
                HandlerInfo* newHandler = catchHandler(node->origin.semantic);
                if (newHandler != currentExceptionHandler && currentExceptionHandler)
                    flushEverything(node->origin, nodeIndex);
                currentExceptionHandler = newHandler;
            }

            if (currentExceptionHandler && (node->op() == SetLocal || node->op() == SetArgument)) {
                InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
                if (inlineCallFrame)
                    seenInlineCallFrames.add(inlineCallFrame);
                VirtualRegister operand = node->local();

                int stackOffset = inlineCallFrame ? inlineCallFrame->stackOffset : 0;
                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()])
                    || operand.isArgument()
                    || (operand.offset() == stackOffset + CallFrame::thisArgumentOffset())) {

                    ASSERT(isValidFlushLocation(block, nodeIndex, operand));

                    VariableAccessData* variableAccessData = currentBlockAccessData.operand(operand);
                    if (!variableAccessData)
                        variableAccessData = newVariableAccessData(operand);

                    insertionSet.insertNode(nodeIndex, SpecNone, 
                        Flush, node->origin, OpInfo(variableAccessData));
                }
            }

            if (node->accessesStack(m_graph))
                currentBlockAccessData.operand(node->local()) = node->variableAccessData();
        }

        if (currentExceptionHandler) {
            NodeOrigin origin = block->at(block->size() - 1)->origin;
            flushEverything(origin, block->size());
        }
    }
Ejemplo n.º 26
0
void VariableEventStream::reconstruct(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
    unsigned index, Operands<ValueRecovery>& valueRecoveries) const
{
    ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
    CodeBlock* baselineCodeBlock = codeBlock->baselineVersion();
    
    unsigned numVariables;
    if (codeOrigin.inlineCallFrame)
        numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + VirtualRegister(codeOrigin.inlineCallFrame->stackOffset).toLocal() + 1;
    else
        numVariables = baselineCodeBlock->m_numCalleeRegisters;
    
    // Crazy special case: if we're at index == 0 then this must be an argument check
    // failure, in which case all variables are already set up. The recoveries should
    // reflect this.
    if (!index) {
        valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
        for (size_t i = 0; i < valueRecoveries.size(); ++i) {
            valueRecoveries[i] = ValueRecovery::displacedInJSStack(
                VirtualRegister(valueRecoveries.operandForIndex(i)), DataFormatJS);
        }
        return;
    }
    
    // Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go.
    unsigned startIndex = index - 1;
    while (at(startIndex).kind() != Reset)
        startIndex--;
    
    // Step 2: Create a mock-up of the DFG's state and execute the events.
    Operands<ValueSource> operandSources(codeBlock->numParameters(), numVariables);
    for (unsigned i = operandSources.size(); i--;)
        operandSources[i] = ValueSource(SourceIsDead);
    HashMap<MinifiedID, MinifiedGenerationInfo> generationInfos;
    for (unsigned i = startIndex; i < index; ++i) {
        const VariableEvent& event = at(i);
        switch (event.kind()) {
        case Reset:
            // nothing to do.
            break;
        case BirthToFill:
        case BirthToSpill:
        case Birth: {
            MinifiedGenerationInfo info;
            info.update(event);
            generationInfos.add(event.id(), info);
            break;
        }
        case Fill:
        case Spill:
        case Death: {
            HashMap<MinifiedID, MinifiedGenerationInfo>::iterator iter = generationInfos.find(event.id());
            ASSERT(iter != generationInfos.end());
            iter->value.update(event);
            break;
        }
        case MovHintEvent:
            if (operandSources.hasOperand(event.bytecodeRegister()))
                operandSources.setOperand(event.bytecodeRegister(), ValueSource(event.id()));
            break;
        case SetLocalEvent:
            if (operandSources.hasOperand(event.bytecodeRegister()))
                operandSources.setOperand(event.bytecodeRegister(), ValueSource::forDataFormat(event.machineRegister(), event.dataFormat()));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }
    
    // Step 3: Compute value recoveries!
    valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
    for (unsigned i = 0; i < operandSources.size(); ++i) {
        ValueSource& source = operandSources[i];
        if (source.isTriviallyRecoverable()) {
            valueRecoveries[i] = source.valueRecovery();
            continue;
        }
        
        ASSERT(source.kind() == HaveNode);
        MinifiedNode* node = graph.at(source.id());
        MinifiedGenerationInfo info = generationInfos.get(source.id());
        if (!info.alive) {
            valueRecoveries[i] = ValueRecovery::constant(jsUndefined());
            continue;
        }

        if (tryToSetConstantRecovery(valueRecoveries[i], node))
            continue;
        
        ASSERT(info.format != DataFormatNone);
        
        if (info.filled) {
            if (info.format == DataFormatDouble) {
                valueRecoveries[i] = ValueRecovery::inFPR(info.u.fpr, DataFormatDouble);
                continue;
            }
#if USE(JSVALUE32_64)
            if (info.format & DataFormatJS) {
                valueRecoveries[i] = ValueRecovery::inPair(info.u.pair.tagGPR, info.u.pair.payloadGPR);
                continue;
            }
#endif
            valueRecoveries[i] = ValueRecovery::inGPR(info.u.gpr, info.format);
            continue;
        }
        
        valueRecoveries[i] =
            ValueRecovery::displacedInJSStack(static_cast<VirtualRegister>(info.u.virtualReg), info.format);
    }
}
    bool run()
    {
        SharedSymbolTable* symbolTable = codeBlock()->symbolTable();

        // This enumerates the locals that we actually care about and packs them. So for example
        // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
        // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
        // Flush, PhantomLocal).
        
        BitVector usedLocals;
        
        // Collect those variables that are used from IR.
        bool hasGetLocalUnlinked = false;
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                Node* node = block->at(nodeIndex);
                switch (node->op()) {
                case GetLocal:
                case SetLocal:
                case Flush:
                case PhantomLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    if (variable->local().isArgument())
                        break;
                    usedLocals.set(variable->local().toLocal());
                    break;
                }
                    
                case GetLocalUnlinked: {
                    VirtualRegister operand = node->unlinkedLocal();
                    if (operand.isArgument())
                        break;
                    usedLocals.set(operand.toLocal());
                    hasGetLocalUnlinked = true;
                    break;
                }
                    
                default:
                    break;
                }
            }
        }
        
        // Ensure that captured variables and captured inline arguments are pinned down.
        // They should have been because of flushes, except that the flushes can be optimized
        // away.
        if (symbolTable) {
            for (int i = symbolTable->captureStart(); i > symbolTable->captureEnd(); i--)
                usedLocals.set(VirtualRegister(i).toLocal());
        }
        if (codeBlock()->usesArguments()) {
            usedLocals.set(codeBlock()->argumentsRegister().toLocal());
            usedLocals.set(unmodifiedArgumentsRegister(codeBlock()->argumentsRegister()).toLocal());
        }
        if (codeBlock()->uncheckedActivationRegister().isValid())
            usedLocals.set(codeBlock()->activationRegister().toLocal());
        for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) {
            InlineCallFrame* inlineCallFrame = *iter;
            if (!inlineCallFrame->executable->usesArguments())
                continue;
            
            VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(inlineCallFrame);
            usedLocals.set(argumentsRegister.toLocal());
            usedLocals.set(unmodifiedArgumentsRegister(argumentsRegister).toLocal());
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                usedLocals.set(VirtualRegister(
                    virtualRegisterForArgument(argument).offset() +
                    inlineCallFrame->stackOffset).toLocal());
            }
        }
        
        Vector<unsigned> allocation(usedLocals.size());
        m_graph.m_nextMachineLocal = 0;
        for (unsigned i = 0; i < usedLocals.size(); ++i) {
            if (!usedLocals.get(i)) {
                allocation[i] = UINT_MAX;
                continue;
            }
            
            allocation[i] = m_graph.m_nextMachineLocal++;
        }
        
        for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
            VariableAccessData* variable = &m_graph.m_variableAccessData[i];
            if (!variable->isRoot())
                continue;
            
            if (variable->local().isArgument()) {
                variable->machineLocal() = variable->local();
                continue;
            }
            
            size_t local = variable->local().toLocal();
            if (local >= allocation.size())
                continue;
            
            if (allocation[local] == UINT_MAX)
                continue;
            
            variable->machineLocal() = virtualRegisterForLocal(
                allocation[variable->local().toLocal()]);
        }
        
        if (codeBlock()->usesArguments()) {
            VirtualRegister argumentsRegister = virtualRegisterForLocal(
                allocation[codeBlock()->argumentsRegister().toLocal()]);
            RELEASE_ASSERT(
                virtualRegisterForLocal(allocation[
                    unmodifiedArgumentsRegister(
                        codeBlock()->argumentsRegister()).toLocal()])
                == unmodifiedArgumentsRegister(argumentsRegister));
            codeBlock()->setArgumentsRegister(argumentsRegister);
        }
        
        if (codeBlock()->uncheckedActivationRegister().isValid()) {
            codeBlock()->setActivationRegister(
                virtualRegisterForLocal(allocation[codeBlock()->activationRegister().toLocal()]));
        }
        
        for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) {
            InlineVariableData data = m_graph.m_inlineVariableData[i];
            InlineCallFrame* inlineCallFrame = data.inlineCallFrame;
            
            if (inlineCallFrame->executable->usesArguments()) {
                inlineCallFrame->argumentsRegister = virtualRegisterForLocal(
                    allocation[m_graph.argumentsRegisterFor(inlineCallFrame).toLocal()]);

                RELEASE_ASSERT(
                    virtualRegisterForLocal(allocation[unmodifiedArgumentsRegister(
                        m_graph.argumentsRegisterFor(inlineCallFrame)).toLocal()])
                    == unmodifiedArgumentsRegister(inlineCallFrame->argumentsRegister));
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                ArgumentPosition& position = m_graph.m_argumentPositions[
                    data.argumentPositionStart + argument];
                VariableAccessData* variable = position.someVariable();
                ValueSource source;
                if (!variable)
                    source = ValueSource(SourceIsDead);
                else {
                    source = ValueSource::forFlushFormat(
                        variable->machineLocal(), variable->flushFormat());
                }
                inlineCallFrame->arguments[argument] = source.valueRecovery();
            }
            
            RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable);
            if (inlineCallFrame->isClosureCall) {
                ValueSource source = ValueSource::forFlushFormat(
                    data.calleeVariable->machineLocal(),
                    data.calleeVariable->flushFormat());
                inlineCallFrame->calleeRecovery = source.valueRecovery();
            } else
                RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant());
        }
        
        if (symbolTable) {
            if (symbolTable->captureCount()) {
                unsigned captureStartLocal = allocation[
                    VirtualRegister(codeBlock()->symbolTable()->captureStart()).toLocal()];
                ASSERT(captureStartLocal != UINT_MAX);
                m_graph.m_machineCaptureStart = virtualRegisterForLocal(captureStartLocal).offset();
            } else
                m_graph.m_machineCaptureStart = virtualRegisterForLocal(0).offset();
        
            // This is an abomination. If we had captured an argument then the argument ends
            // up being "slow", meaning that loads of the argument go through an extra lookup
            // table.
            if (const SlowArgument* slowArguments = symbolTable->slowArguments()) {
                auto newSlowArguments = std::make_unique<SlowArgument[]>(
                    symbolTable->parameterCount());
                for (size_t i = symbolTable->parameterCount(); i--;) {
                    newSlowArguments[i] = slowArguments[i];
                    VirtualRegister reg = VirtualRegister(slowArguments[i].index);
                    if (reg.isLocal())
                        newSlowArguments[i].index = virtualRegisterForLocal(allocation[reg.toLocal()]).offset();
                }
            
                m_graph.m_slowArguments = std::move(newSlowArguments);
            }
        }
        
        // Fix GetLocalUnlinked's variable references.
        if (hasGetLocalUnlinked) {
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                    Node* node = block->at(nodeIndex);
                    switch (node->op()) {
                    case GetLocalUnlinked: {
                        VirtualRegister operand = node->unlinkedLocal();
                        if (operand.isLocal())
                            operand = virtualRegisterForLocal(allocation[operand.toLocal()]);
                        node->setUnlinkedMachineLocal(operand);
                        break;
                    }
                        
                    default:
                        break;
                    }
                }
            }
        }
        
        return true;
    }
Ejemplo n.º 28
0
    bool run()
    {
        // This enumerates the locals that we actually care about and packs them. So for example
        // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
        // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
        // Flush, PhantomLocal).
        
        BitVector usedLocals;
        
        // Collect those variables that are used from IR.
        bool hasNodesThatNeedFixup = false;
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                Node* node = block->at(nodeIndex);
                switch (node->op()) {
                case GetLocal:
                case SetLocal:
                case Flush:
                case PhantomLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    if (variable->local().isArgument())
                        break;
                    usedLocals.set(variable->local().toLocal());
                    break;
                }
                    
                case GetLocalUnlinked: {
                    VirtualRegister operand = node->unlinkedLocal();
                    if (operand.isArgument())
                        break;
                    usedLocals.set(operand.toLocal());
                    hasNodesThatNeedFixup = true;
                    break;
                }
                    
                case LoadVarargs:
                case ForwardVarargs: {
                    LoadVarargsData* data = node->loadVarargsData();
                    if (data->count.isLocal())
                        usedLocals.set(data->count.toLocal());
                    if (data->start.isLocal()) {
                        // This part really relies on the contiguity of stack layout
                        // assignments.
                        ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal());
                        for (unsigned i = data->limit; i--;) 
                            usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal());
                    } // the else case shouldn't happen.
                    hasNodesThatNeedFixup = true;
                    break;
                }
                    
                case PutStack:
                case GetStack: {
                    StackAccessData* stack = node->stackAccessData();
                    if (stack->local.isArgument())
                        break;
                    usedLocals.set(stack->local.toLocal());
                    break;
                }
                    
                default:
                    break;
                }
            }
        }
        
        for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
            InlineCallFrame* inlineCallFrame = *iter;
            
            if (inlineCallFrame->isVarargs()) {
                usedLocals.set(VirtualRegister(
                    JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal());
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                usedLocals.set(VirtualRegister(
                    virtualRegisterForArgument(argument).offset() +
                    inlineCallFrame->stackOffset).toLocal());
            }
        }
        
        Vector<unsigned> allocation(usedLocals.size());
        m_graph.m_nextMachineLocal = 0;
        for (unsigned i = 0; i < usedLocals.size(); ++i) {
            if (!usedLocals.get(i)) {
                allocation[i] = UINT_MAX;
                continue;
            }
            
            allocation[i] = m_graph.m_nextMachineLocal++;
        }
        
        for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
            VariableAccessData* variable = &m_graph.m_variableAccessData[i];
            if (!variable->isRoot())
                continue;
            
            if (variable->local().isArgument()) {
                variable->machineLocal() = variable->local();
                continue;
            }
            
            size_t local = variable->local().toLocal();
            if (local >= allocation.size())
                continue;
            
            if (allocation[local] == UINT_MAX)
                continue;
            
            variable->machineLocal() = assign(allocation, variable->local());
        }
        
        for (StackAccessData* data : m_graph.m_stackAccessData) {
            if (!data->local.isLocal()) {
                data->machineLocal = data->local;
                continue;
            }
            
            if (static_cast<size_t>(data->local.toLocal()) >= allocation.size())
                continue;
            if (allocation[data->local.toLocal()] == UINT_MAX)
                continue;
            
            data->machineLocal = assign(allocation, data->local);
        }
        
        // This register is never valid for DFG code blocks.
        codeBlock()->setActivationRegister(VirtualRegister());
        if (LIKELY(!m_graph.hasDebuggerEnabled()))
            codeBlock()->setScopeRegister(VirtualRegister());
        else
            codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister()));

        for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) {
            InlineVariableData data = m_graph.m_inlineVariableData[i];
            InlineCallFrame* inlineCallFrame = data.inlineCallFrame;
            
            if (inlineCallFrame->isVarargs()) {
                inlineCallFrame->argumentCountRegister = assign(
                    allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                ArgumentPosition& position = m_graph.m_argumentPositions[
                    data.argumentPositionStart + argument];
                VariableAccessData* variable = position.someVariable();
                ValueSource source;
                if (!variable)
                    source = ValueSource(SourceIsDead);
                else {
                    source = ValueSource::forFlushFormat(
                        variable->machineLocal(), variable->flushFormat());
                }
                inlineCallFrame->arguments[argument] = source.valueRecovery();
            }
            
            RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable);
            if (inlineCallFrame->isClosureCall) {
                VariableAccessData* variable = data.calleeVariable->find();
                ValueSource source = ValueSource::forFlushFormat(
                    variable->machineLocal(),
                    variable->flushFormat());
                inlineCallFrame->calleeRecovery = source.valueRecovery();
            } else
                RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant());
        }
        
        // Fix GetLocalUnlinked's variable references.
        if (hasNodesThatNeedFixup) {
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                    Node* node = block->at(nodeIndex);
                    switch (node->op()) {
                    case GetLocalUnlinked: {
                        node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal()));
                        break;
                    }
                        
                    case LoadVarargs:
                    case ForwardVarargs: {
                        LoadVarargsData* data = node->loadVarargsData();
                        data->machineCount = assign(allocation, data->count);
                        data->machineStart = assign(allocation, data->start);
                        break;
                    }
                        
                    default:
                        break;
                    }
                }
            }
        }
        
        return true;
    }
Ejemplo n.º 29
0
    bool foldConstants(BasicBlock* block)
    {
        bool changed = false;
        m_state.beginBasicBlock(block);
        for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
            if (!m_state.isValid())
                break;
            
            Node* node = block->at(indexInBlock);

            bool alreadyHandled = false;
            bool eliminated = false;
                    
            switch (node->op()) {
            case BooleanToNumber: {
                if (node->child1().useKind() == UntypedUse
                    && !m_interpreter.needsTypeCheck(node->child1(), SpecBoolean))
                    node->child1().setUseKind(BooleanUse);
                break;
            }
                
            case CheckArgumentsNotCreated: {
                if (!isEmptySpeculation(
                        m_state.variables().operand(
                            m_graph.argumentsRegisterFor(node->origin.semantic)).m_type))
                    break;
                node->convertToPhantom();
                eliminated = true;
                break;
            }
                    
            case CheckStructure:
            case ArrayifyToStructure: {
                AbstractValue& value = m_state.forNode(node->child1());
                StructureSet set;
                if (node->op() == ArrayifyToStructure)
                    set = node->structure();
                else
                    set = node->structureSet();
                if (value.m_structure.isSubsetOf(set)) {
                    m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell.
                    node->convertToPhantom();
                    eliminated = true;
                    break;
                }
                break;
            }
                
            case CheckArray:
            case Arrayify: {
                if (!node->arrayMode().alreadyChecked(m_graph, node, m_state.forNode(node->child1())))
                    break;
                node->convertToPhantom();
                eliminated = true;
                break;
            }
                
            case PutStructure: {
                if (m_state.forNode(node->child1()).m_structure.onlyStructure() != node->transition()->next)
                    break;
                
                node->convertToPhantom();
                eliminated = true;
                break;
            }
                
            case CheckFunction: {
                if (m_state.forNode(node->child1()).value() != node->function()->value())
                    break;
                node->convertToPhantom();
                eliminated = true;
                break;
            }
                
            case CheckInBounds: {
                JSValue left = m_state.forNode(node->child1()).value();
                JSValue right = m_state.forNode(node->child2()).value();
                if (left && right && left.isInt32() && right.isInt32()
                    && static_cast<uint32_t>(left.asInt32()) < static_cast<uint32_t>(right.asInt32())) {
                    node->convertToPhantom();
                    eliminated = true;
                    break;
                }
                
                break;
            }
                
            case MultiGetByOffset: {
                Edge baseEdge = node->child1();
                Node* base = baseEdge.node();
                MultiGetByOffsetData& data = node->multiGetByOffsetData();

                // First prune the variants, then check if the MultiGetByOffset can be
                // strength-reduced to a GetByOffset.
                
                AbstractValue baseValue = m_state.forNode(base);
                
                m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
                alreadyHandled = true; // Don't allow the default constant folder to do things to this.
                
                for (unsigned i = 0; i < data.variants.size(); ++i) {
                    GetByIdVariant& variant = data.variants[i];
                    variant.structureSet().filter(baseValue);
                    if (variant.structureSet().isEmpty()) {
                        data.variants[i--] = data.variants.last();
                        data.variants.removeLast();
                        changed = true;
                    }
                }
                
                if (data.variants.size() != 1)
                    break;
                
                emitGetByOffset(
                    indexInBlock, node, baseValue, data.variants[0], data.identifierNumber);
                changed = true;
                break;
            }
                
            case MultiPutByOffset: {
                Edge baseEdge = node->child1();
                Node* base = baseEdge.node();
                MultiPutByOffsetData& data = node->multiPutByOffsetData();
                
                AbstractValue baseValue = m_state.forNode(base);

                m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
                alreadyHandled = true; // Don't allow the default constant folder to do things to this.
                

                for (unsigned i = 0; i < data.variants.size(); ++i) {
                    PutByIdVariant& variant = data.variants[i];
                    variant.oldStructure().filter(baseValue);
                    
                    if (variant.oldStructure().isEmpty()) {
                        data.variants[i--] = data.variants.last();
                        data.variants.removeLast();
                        changed = true;
                        continue;
                    }
                    
                    if (variant.kind() == PutByIdVariant::Transition
                        && variant.oldStructure().onlyStructure() == variant.newStructure()) {
                        variant = PutByIdVariant::replace(
                            variant.oldStructure(),
                            variant.offset());
                        changed = true;
                    }
                }

                if (data.variants.size() != 1)
                    break;
                
                emitPutByOffset(
                    indexInBlock, node, baseValue, data.variants[0], data.identifierNumber);
                changed = true;
                break;
            }
        
            case GetById:
            case GetByIdFlush: {
                Edge childEdge = node->child1();
                Node* child = childEdge.node();
                unsigned identifierNumber = node->identifierNumber();
                
                AbstractValue baseValue = m_state.forNode(child);

                m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
                alreadyHandled = true; // Don't allow the default constant folder to do things to this.

                if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered()
                    || (node->child1().useKind() == UntypedUse || (baseValue.m_type & ~SpecCell)))
                    break;
                
                GetByIdStatus status = GetByIdStatus::computeFor(
                    vm(), baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber]);
                if (!status.isSimple())
                    break;
                
                for (unsigned i = status.numVariants(); i--;) {
                    if (!status[i].constantChecks().isEmpty()
                        || status[i].alternateBase()) {
                        // FIXME: We could handle prototype cases.
                        // https://bugs.webkit.org/show_bug.cgi?id=110386
                        break;
                    }
                }
                
                if (status.numVariants() == 1) {
                    emitGetByOffset(indexInBlock, node, baseValue, status[0], identifierNumber);
                    changed = true;
                    break;
                }
                
                if (!isFTL(m_graph.m_plan.mode))
                    break;
                
                MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
                data->variants = status.variants();
                data->identifierNumber = identifierNumber;
                node->convertToMultiGetByOffset(data);
                changed = true;
                break;
            }
                
            case PutById:
            case PutByIdDirect:
            case PutByIdFlush: {
                NodeOrigin origin = node->origin;
                Edge childEdge = node->child1();
                Node* child = childEdge.node();
                unsigned identifierNumber = node->identifierNumber();
                
                ASSERT(childEdge.useKind() == CellUse);
                
                AbstractValue baseValue = m_state.forNode(child);

                m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
                alreadyHandled = true; // Don't allow the default constant folder to do things to this.

                if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered())
                    break;
                
                PutByIdStatus status = PutByIdStatus::computeFor(
                    vm(),
                    m_graph.globalObjectFor(origin.semantic),
                    baseValue.m_structure.set(),
                    m_graph.identifiers()[identifierNumber],
                    node->op() == PutByIdDirect);
                
                if (!status.isSimple())
                    break;
                
                ASSERT(status.numVariants());
                
                if (status.numVariants() > 1 && !isFTL(m_graph.m_plan.mode))
                    break;
                
                changed = true;
                
                for (unsigned i = status.numVariants(); i--;)
                    addChecks(origin, indexInBlock, status[i].constantChecks());
                
                if (status.numVariants() == 1) {
                    emitPutByOffset(indexInBlock, node, baseValue, status[0], identifierNumber);
                    break;
                }
                
                ASSERT(isFTL(m_graph.m_plan.mode));

                MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
                data->variants = status.variants();
                data->identifierNumber = identifierNumber;
                node->convertToMultiPutByOffset(data);
                break;
            }

            case ToPrimitive: {
                if (m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString))
                    break;
                
                node->convertToIdentity();
                changed = true;
                break;
            }
                
            case GetMyArgumentByVal: {
                InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
                JSValue value = m_state.forNode(node->child1()).m_value;
                if (inlineCallFrame && value && value.isInt32()) {
                    int32_t index = value.asInt32();
                    if (index >= 0
                        && static_cast<size_t>(index + 1) < inlineCallFrame->arguments.size()) {
                        // Roll the interpreter over this.
                        m_interpreter.execute(indexInBlock);
                        eliminated = true;
                        
                        int operand =
                            inlineCallFrame->stackOffset +
                            m_graph.baselineCodeBlockFor(inlineCallFrame)->argumentIndexAfterCapture(index);
                        
                        m_insertionSet.insertNode(
                            indexInBlock, SpecNone, CheckArgumentsNotCreated, node->origin);
                        m_insertionSet.insertNode(
                            indexInBlock, SpecNone, Phantom, node->origin, node->children);
                        
                        node->convertToGetLocalUnlinked(VirtualRegister(operand));
                        break;
                    }
                }
                
                break;
            }
                
            case Check: {
                alreadyHandled = true;
                m_interpreter.execute(indexInBlock);
                for (unsigned i = 0; i < AdjacencyList::Size; ++i) {
                    Edge edge = node->children.child(i);
                    if (!edge)
                        break;
                    if (edge.isProved() || edge.willNotHaveCheck()) {
                        node->children.removeEdge(i--);
                        changed = true;
                    }
                }
                break;
            }

            default:
                break;
            }
            
            if (eliminated) {
                changed = true;
                continue;
            }
                
            if (alreadyHandled)
                continue;
            
            m_interpreter.execute(indexInBlock);
            if (!m_state.isValid()) {
                // If we invalidated then we shouldn't attempt to constant-fold. Here's an
                // example:
                //
                //     c: JSConstant(4.2)
                //     x: ValueToInt32(Check:Int32:@const)
                //
                // It would be correct for an analysis to assume that execution cannot
                // proceed past @x. Therefore, constant-folding @x could be rather bad. But,
                // the CFA may report that it found a constant even though it also reported
                // that everything has been invalidated. This will only happen in a couple of
                // the constant folding cases; most of them are also separately defensive
                // about such things.
                break;
            }
            if (!node->shouldGenerate() || m_state.didClobber() || node->hasConstant())
                continue;
            
            // Interesting fact: this freezing that we do right here may turn an fragile value into
            // a weak value. See DFGValueStrength.h.
            FrozenValue* value = m_graph.freeze(m_state.forNode(node).value());
            if (!*value)
                continue;
            
            NodeOrigin origin = node->origin;
            AdjacencyList children = node->children;
            
            m_graph.convertToConstant(node, value);
            if (!children.isEmpty()) {
                m_insertionSet.insertNode(
                    indexInBlock, SpecNone, Phantom, origin, children);
            }
            
            changed = true;
        }
        m_state.reset();
        m_insertionSet.execute(block);
        
        return changed;
    }
Ejemplo n.º 30
0
    bool run()
    {
        ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == SSA);
        ASSERT(m_graph.m_unificationState == GloballyUnified);
        ASSERT(m_graph.m_refCountState == EverythingIsLive);
        
        m_count = 0;
        
        if (m_verbose && !shouldDumpGraphAtEachPhase(m_graph.m_plan.mode)) {
            dataLog("Graph before CFA:\n");
            m_graph.dump();
        }
        
        // This implements a pseudo-worklist-based forward CFA, except that the visit order
        // of blocks is the bytecode program order (which is nearly topological), and
        // instead of a worklist we just walk all basic blocks checking if cfaShouldRevisit
        // is set to true. This is likely to balance the efficiency properties of both
        // worklist-based and forward fixpoint-based approaches. Like a worklist-based
        // approach, it won't visit code if it's meaningless to do so (nothing changed at
        // the head of the block or the predecessors have not been visited). Like a forward
        // fixpoint-based approach, it has a high probability of only visiting a block
        // after all predecessors have been visited. Only loops will cause this analysis to
        // revisit blocks, and the amount of revisiting is proportional to loop depth.
        
        m_state.initialize();
        
        do {
            m_changed = false;
            performForwardCFA();
        } while (m_changed);
        
        if (m_graph.m_form != SSA) {
            if (m_verbose)
                dataLog("   Widening state at OSR entry block.\n");
            
            ASSERT(!m_changed);
            
            // Widen the abstract values at the block that serves as the must-handle OSR entry.
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                
                if (!block->isOSRTarget)
                    continue;
                if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
                    continue;
                
                if (m_verbose)
                    dataLog("   Found must-handle block: ", *block, "\n");
                
                bool changed = false;
                for (size_t i = m_graph.m_plan.mustHandleValues.size(); i--;) {
                    int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
                    JSValue value = m_graph.m_plan.mustHandleValues[i];
                    Node* node = block->variablesAtHead.operand(operand);
                    if (!node) {
                        if (m_verbose)
                            dataLog("   Not live: ", VirtualRegister(operand), "\n");
                        continue;
                    }
                    
                    if (m_verbose)
                        dataLog("   Widening ", VirtualRegister(operand), " with ", value, "\n");

                    AbstractValue& target = block->valuesAtHead.operand(operand);
                    changed |= target.mergeOSREntryValue(m_graph, value);
                    target.fixTypeForRepresentation(
                        m_graph, resultFor(node->variableAccessData()->flushFormat()));
                }
                
                if (changed || !block->cfaHasVisited) {
                    m_changed = true;
                    block->cfaShouldRevisit = true;
                }
            }

            // Propagate any of the changes we just introduced.
            while (m_changed) {
                m_changed = false;
                performForwardCFA();
            }
            
            // Make sure we record the intersection of all proofs that we ever allowed the
            // compiler to rely upon.
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                
                block->intersectionOfCFAHasVisited &= block->cfaHasVisited;
                for (unsigned i = block->intersectionOfPastValuesAtHead.size(); i--;)
                    block->intersectionOfPastValuesAtHead[i].filter(block->valuesAtHead[i]);
            }
        }
        
        return true;
    }