Exemplo n.º 1
0
Arguments* StackVisitor::Frame::existingArguments()
{
    if (codeBlock()->codeType() != FunctionCode)
        return 0;
    if (!codeBlock()->usesArguments())
        return 0;
    
    VirtualRegister reg;
        
#if ENABLE(DFG_JIT)
    if (isInlinedFrame())
        reg = inlineCallFrame()->argumentsRegister;
    else
#endif // ENABLE(DFG_JIT)
        reg = codeBlock()->argumentsRegister();

    // Care should be taken here since exception fuzzing may raise exceptions in
    // places where they would be otherwise impossible. Therefore, callFrame may
    // lack activation even if the codeBlock signals need of activation. Also,
    // even if codeBlock signals the use of arguments, the
    // unmodifiedArgumentsRegister may not be initialized yet (neither locally
    // nor in lexicalEnvironment).
    JSValue result = jsUndefined();
    if (codeBlock()->needsActivation() && callFrame()->hasActivation())
        result = callFrame()->lexicalEnvironment()->registerAt(unmodifiedArgumentsRegister(reg).offset()).get();
    if (!result || !result.isCell()) // Try local unmodifiedArgumentsRegister if lexicalEnvironment is not present (generally possible) or has not set up registers yet (only possible if fuzzing exceptions).
        result = callFrame()->r(unmodifiedArgumentsRegister(reg).offset()).jsValue();
    if (!result || !result.isCell()) // Protect against the case when exception fuzzing throws when unmodifiedArgumentsRegister is not set up yet (e.g., in op_enter).
        return 0;
    return jsCast<Arguments*>(result);
}
Exemplo n.º 2
0
CodeOrigin CallFrame::codeOrigin()
{
    if (!codeBlock())
        return CodeOrigin(0);
#if ENABLE(DFG_JIT)
    if (callSiteBitsAreCodeOriginIndex()) {
        CallSiteIndex index = callSiteIndex();
        ASSERT(codeBlock()->canGetCodeOrigin(index));
        return codeBlock()->codeOrigin(index);
    }
#endif
    return CodeOrigin(callSiteBitsAsBytecodeOffset());
}
Exemplo n.º 3
0
CodeOrigin CallFrame::codeOrigin()
{
    if (!codeBlock())
        return CodeOrigin(0);
#if ENABLE(DFG_JIT)
    if (hasLocationAsCodeOriginIndex()) {
        unsigned index = locationAsCodeOriginIndex();
        ASSERT(codeBlock()->canGetCodeOrigin(index));
        return codeBlock()->codeOrigin(index);
    }
#endif
    return CodeOrigin(locationAsBytecodeOffset());
}
Exemplo n.º 4
0
CodeBlock* CallFrame::someCodeBlockForPossiblyInlinedCode()
{
    if (!isInlineCallFrame())
        return codeBlock();
    
    return jsCast<FunctionExecutable*>(inlineCallFrame()->executable.get())->baselineCodeBlockFor(
        inlineCallFrame()->isCall ? CodeForCall : CodeForConstruct);
}
 bool isWithinPowerOfTwoForConstant(Node* node)
 {
     JSValue immediateValue = node->valueOfJSConstant(codeBlock());
     if (!immediateValue.isNumber())
         return false;
     double immediate = immediateValue.asNumber();
     return immediate > -(static_cast<int64_t>(1) << power) && immediate < (static_cast<int64_t>(1) << power);
 }
Exemplo n.º 6
0
void JITCompiler::compile()
{
    setStartOfCode();
    compileEntry();
    m_speculative = std::make_unique<SpeculativeJIT>(*this);

    // Plant a check that sufficient space is available in the JSStack.
    addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);

    addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
    checkStackPointerAlignment();
    compileSetupRegistersForEntry();
    compileEntryExecutionFlag();
    compileBody();
    setEndOfMainPath();

    // === Footer code generation ===
    //
    // Generate the stack overflow handling; if the stack check in the entry head fails,
    // we need to call out to a helper function to throw the StackOverflowError.
    stackOverflow.link(this);

    emitStoreCodeOrigin(CodeOrigin(0));

    if (maxFrameExtentForSlowPathCall)
        addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);

    m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);

    // Generate slow path code.
    m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
    m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    m_speculative->createOSREntries();
    setEndOfCode();

    auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer->didFailToAllocate()) {
        m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
        return;
    }
    
    link(*linkBuffer);
    m_speculative->linkOSREntries(*linkBuffer);

    m_jitCode->shrinkToFit();
    codeBlock()->shrinkToFit(CodeBlock::LateShrink);

    disassemble(*linkBuffer);
    
    m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
        m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer));
}
Exemplo n.º 7
0
unsigned CallFrame::bytecodeOffset()
{
    if (!codeBlock())
        return 0;
#if ENABLE(DFG_JIT)
    if (callSiteBitsAreCodeOriginIndex()) {
        ASSERT(codeBlock());
        CodeOrigin codeOrigin = this->codeOrigin();
        for (InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; inlineCallFrame;) {
            codeOrigin = inlineCallFrame->directCaller;
            inlineCallFrame = codeOrigin.inlineCallFrame;
        }
        return codeOrigin.bytecodeIndex;
    }
#endif
    ASSERT(callSiteBitsAreBytecodeOffset());
    return callSiteBitsAsBytecodeOffset();
}
Exemplo n.º 8
0
bool CallFrame::callSiteBitsAreBytecodeOffset() const
{
    ASSERT(codeBlock());
    switch (codeBlock()->jitType()) {
    case JITCode::InterpreterThunk:
    case JITCode::BaselineJIT:
        return true;
    case JITCode::None:
    case JITCode::HostCallThunk:
        RELEASE_ASSERT_NOT_REACHED();
        return false;
    default:
        return false;
    }

    RELEASE_ASSERT_NOT_REACHED();
    return false;
}
Exemplo n.º 9
0
bool CallFrame::callSiteBitsAreCodeOriginIndex() const
{
    ASSERT(codeBlock());
    switch (codeBlock()->jitType()) {
    case JITCode::DFGJIT:
    case JITCode::FTLJIT:
        return true;
    case JITCode::None:
    case JITCode::HostCallThunk:
        RELEASE_ASSERT_NOT_REACHED();
        return false;
    default:
        return false;
    }

    RELEASE_ASSERT_NOT_REACHED();
    return false;
}
    bool run()
    {
        ASSERT(m_graph.m_form == ThreadedCPS);
        ASSERT(m_graph.m_unificationState == GloballyUnified);
        
        ASSERT(codeBlock()->numParameters() >= 1);
        {
            ConcurrentJSLocker locker(profiledBlock()->m_lock);
            
            // We only do this for the arguments at the first block. The arguments from
            // other entrypoints have already been populated with their predictions.
            auto& arguments = m_graph.m_rootToArguments.find(m_graph.block(0))->value;

            for (size_t arg = 0; arg < static_cast<size_t>(codeBlock()->numParameters()); ++arg) {
                ValueProfile& profile = profiledBlock()->valueProfileForArgument(arg);
                arguments[arg]->variableAccessData()->predict(
                    profile.computeUpdatedPrediction(locker));
            }
        }
        
        for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            if (!block->isOSRTarget)
                continue;
            if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex())
                continue;
            const Operands<Optional<JSValue>>& mustHandleValues = m_graph.m_plan.mustHandleValues();
            for (size_t i = 0; i < mustHandleValues.size(); ++i) {
                int operand = mustHandleValues.operandForIndex(i);
                Optional<JSValue> value = mustHandleValues[i];
                if (!value)
                    continue;
                Node* node = block->variablesAtHead.operand(operand);
                if (!node)
                    continue;
                ASSERT(node->accessesStack(m_graph));
                node->variableAccessData()->predict(speculationFromValue(value.value()));
            }
        }
        
        return true;
    }
Exemplo n.º 11
0
bool CallFrame::isInlineCallFrameSlow()
{
    if (!callee())
        return false;
    JSCell* calleeAsFunctionCell = getJSFunction(callee());
    if (!calleeAsFunctionCell)
        return false;
    JSFunction* calleeAsFunction = asFunction(calleeAsFunctionCell);
    return calleeAsFunction->executable() != codeBlock()->ownerExecutable();
}
Exemplo n.º 12
0
unsigned CallFrame::bytecodeOffset()
{
    if (!codeBlock())
        return 0;
#if ENABLE(DFG_JIT)
    if (hasLocationAsCodeOriginIndex())
        return bytecodeOffsetFromCodeOriginIndex();
#endif
    return locationAsBytecodeOffset();
}
    bool run()
    {
        ASSERT(m_graph.m_form == ThreadedCPS);
        ASSERT(m_graph.m_unificationState == GloballyUnified);
        
        ASSERT(codeBlock()->numParameters() >= 1);
        for (size_t arg = 0; arg < static_cast<size_t>(codeBlock()->numParameters()); ++arg) {
            ValueProfile* profile = profiledBlock()->valueProfileForArgument(arg);
            if (!profile)
                continue;
            
            m_graph.m_arguments[arg]->variableAccessData()->predict(profile->computeUpdatedPrediction());
            
#if DFG_ENABLE(DEBUG_VERBOSE)
            dataLog(
                "Argument [", arg, "] prediction: ",
                SpeculationDump(m_graph.m_arguments[arg]->variableAccessData()->prediction()), "\n");
#endif
        }
        
        for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
            BasicBlock* block = m_graph.m_blocks[blockIndex].get();
            if (!block)
                continue;
            if (!block->isOSRTarget)
                continue;
            if (block->bytecodeBegin != m_graph.m_osrEntryBytecodeIndex)
                continue;
            for (size_t i = 0; i < m_graph.m_mustHandleValues.size(); ++i) {
                Node* node = block->variablesAtHead.operand(
                    m_graph.m_mustHandleValues.operandForIndex(i));
                if (!node)
                    continue;
                ASSERT(node->hasLocal());
                node->variableAccessData()->predict(
                    speculationFromValue(m_graph.m_mustHandleValues[i]));
            }
        }
        
        return true;
    }
Exemplo n.º 14
0
StackVisitor::Frame::CodeType StackVisitor::Frame::codeType() const
{
    if (isWasmFrame())
        return CodeType::Wasm;

    if (!codeBlock())
        return CodeType::Native;

    switch (codeBlock()->codeType()) {
    case EvalCode:
        return CodeType::Eval;
    case ModuleCode:
        return CodeType::Module;
    case FunctionCode:
        return CodeType::Function;
    case GlobalCode:
        return CodeType::Global;
    }
    RELEASE_ASSERT_NOT_REACHED();
    return CodeType::Global;
}
Exemplo n.º 15
0
Arguments* StackVisitor::Frame::existingArguments()
{
    if (codeBlock()->codeType() != FunctionCode)
        return 0;
    if (!codeBlock()->usesArguments())
        return 0;
    
    VirtualRegister reg;
        
#if ENABLE(DFG_JIT)
    if (isInlinedFrame())
        reg = inlineCallFrame()->argumentsRegister;
    else
#endif // ENABLE(DFG_JIT)
        reg = codeBlock()->argumentsRegister();
    
    JSValue result = callFrame()->r(unmodifiedArgumentsRegister(reg).offset()).jsValue();
    if (!result || !result.isCell()) // Protect against Undefined in case we throw in op_enter.
        return 0;
    return jsCast<Arguments*>(result);
}
Exemplo n.º 16
0
 bool run()
 {
     ASSERT(m_graph.m_form == ThreadedCPS);
     ASSERT(m_graph.m_unificationState == GloballyUnified);
     
     ASSERT(codeBlock()->numParameters() >= 1);
     {
         ConcurrentJITLocker locker(profiledBlock()->m_lock);
         
         for (size_t arg = 0; arg < static_cast<size_t>(codeBlock()->numParameters()); ++arg) {
             ValueProfile* profile = profiledBlock()->valueProfileForArgument(arg);
             if (!profile)
                 continue;
         
             m_graph.m_arguments[arg]->variableAccessData()->predict(
                 profile->computeUpdatedPrediction(locker));
         }
     }
     
     for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
         BasicBlock* block = m_graph.block(blockIndex);
         if (!block)
             continue;
         if (!block->isOSRTarget)
             continue;
         if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
             continue;
         for (size_t i = 0; i < m_graph.m_plan.mustHandleValues.size(); ++i) {
             int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
             Node* node = block->variablesAtHead.operand(operand);
             if (!node)
                 continue;
             ASSERT(node->hasLocal(m_graph));
             node->variableAccessData()->predict(
                 speculationFromValue(m_graph.m_plan.mustHandleValues[i]));
         }
     }
     
     return true;
 }
Exemplo n.º 17
0
StackIterator::Frame::CodeType StackIterator::Frame::codeType() const
{
    if (!isJSFrame())
        return StackIterator::Frame::Native;

    switch (codeBlock()->codeType()) {
    case EvalCode:
        return StackIterator::Frame::Eval;
    case FunctionCode:
        return StackIterator::Frame::Function;
    case GlobalCode:
        return StackIterator::Frame::Global;
    }
    RELEASE_ASSERT_NOT_REACHED();
    return StackIterator::Frame::Global;
}
Exemplo n.º 18
0
String StackVisitor::Frame::sourceURL()
{
    String traceLine;

    switch (codeType()) {
    case CodeType::Eval:
    case CodeType::Function:
    case CodeType::Global: {
        String sourceURL = codeBlock()->ownerExecutable()->sourceURL();
        if (!sourceURL.isEmpty())
            traceLine = sourceURL.impl();
        break;
    }
    case CodeType::Native:
        traceLine = ASCIILiteral("[native code]");
        break;
    }
    return traceLine.isNull() ? emptyString() : traceLine;
}
Exemplo n.º 19
0
void JITCompiler::link()
{
    OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
    if (linkBuffer->didFailToAllocate()) {
        m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
        return;
    }
    
    link(*linkBuffer);
    m_speculative->linkOSREntries(*linkBuffer);
    
    m_jitCode->shrinkToFit();
    codeBlock()->shrinkToFit(CodeBlock::LateShrink);

    disassemble(*linkBuffer);
    
    m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
        m_graph.m_plan, m_jitCode.release(), linkBuffer.release()));
}
void JITCompiler::linkFunction()
{
    // === Link ===
    OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
    if (linkBuffer->didFailToAllocate()) {
        m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
        return;
    }
    link(*linkBuffer);
    m_speculative->linkOSREntries(*linkBuffer);
    
    m_jitCode->shrinkToFit();
    codeBlock()->shrinkToFit(CodeBlock::LateShrink);
    
    linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress()));
    
    disassemble(*linkBuffer);
    
    m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
        m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), m_arityCheck));
}
 bool isWithinPowerOfTwo(Node* node)
 {
     switch (node->op()) {
     case JSConstant: {
         return isWithinPowerOfTwoForConstant<power>(node);
     }
         
     case BitAnd: {
         if (power > 31)
             return true;
         
         return isWithinPowerOfTwoNonRecursive<power>(node->child1().node())
             || isWithinPowerOfTwoNonRecursive<power>(node->child2().node());
     }
         
     case BitOr:
     case BitXor:
     case BitLShift:
     case ValueToInt32: {
         return power > 31;
     }
         
     case BitRShift:
     case BitURShift: {
         if (power > 31)
             return true;
         
         Node* shiftAmount = node->child2().node();
         if (shiftAmount->op() != JSConstant)
             return false;
         JSValue immediateValue = shiftAmount->valueOfJSConstant(codeBlock());
         if (!immediateValue.isInt32())
             return false;
         return immediateValue.asInt32() > 32 - power;
     }
         
     default:
         return false;
     }
 }
Exemplo n.º 22
0
String StackVisitor::Frame::sourceURL() const
{
    String traceLine;

    switch (codeType()) {
    case CodeType::Eval:
    case CodeType::Module:
    case CodeType::Function:
    case CodeType::Global: {
        String sourceURL = codeBlock()->ownerExecutable()->sourceURL();
        if (!sourceURL.isEmpty())
            traceLine = sourceURL.impl();
        break;
    }
    case CodeType::Native:
        traceLine = "[native code]"_s;
        break;
    case CodeType::Wasm:
        traceLine = "[wasm code]"_s;
        break;
    }
    return traceLine.isNull() ? emptyString() : traceLine;
}
Exemplo n.º 23
0
void JITCompiler::linkFunction()
{
    // === Link ===
    OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
    if (linkBuffer->didFailToAllocate()) {
        m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
        return;
    }
    link(*linkBuffer);
    m_speculative->linkOSREntries(*linkBuffer);
    
    m_jitCode->shrinkToFit();
    codeBlock()->shrinkToFit(CodeBlock::LateShrink);
    
    // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs.
    linkBuffer->link(m_callStackCheck, cti_stack_check);
    linkBuffer->link(m_callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
    linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress()));
    
    disassemble(*linkBuffer);
    
    m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
        m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), m_arityCheck));
}
Exemplo n.º 24
0
    void fixupNode(Node& node)
    {
        if (!node.shouldGenerate())
            return;
        
        NodeType op = node.op();

#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
        dataLog("   %s @%u: ", Graph::opName(op), m_compileIndex);
#endif
        
        switch (op) {
        case GetById: {
            if (!isInt32Speculation(m_graph[m_compileIndex].prediction()))
                break;
            if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length)
                break;
            bool isArray = isArraySpeculation(m_graph[node.child1()].prediction());
            bool isArguments = isArgumentsSpeculation(m_graph[node.child1()].prediction());
            bool isString = isStringSpeculation(m_graph[node.child1()].prediction());
            bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
            bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
            bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
            bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
            bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
            bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
            bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
            bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
            bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
            if (!isArray && !isArguments && !isString && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
                break;
            
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
            dataLog("  @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength");
#endif
            if (isArray) {
                node.setOp(GetArrayLength);
                ASSERT(node.flags() & NodeMustGenerate);
                node.clearFlags(NodeMustGenerate);
                m_graph.deref(m_compileIndex);
                
                ArrayProfile* arrayProfile = 
                    m_graph.baselineCodeBlockFor(node.codeOrigin)->getArrayProfile(
                        node.codeOrigin.bytecodeIndex);
                if (!arrayProfile)
                    break;
                arrayProfile->computeUpdatedPrediction();
                if (!arrayProfile->hasDefiniteStructure())
                    break;
                m_graph.ref(node.child1());
                Node checkStructure(CheckStructure, node.codeOrigin, OpInfo(m_graph.addStructureSet(arrayProfile->expectedStructure())), node.child1().index());
                checkStructure.ref();
                NodeIndex checkStructureIndex = m_graph.size();
                m_graph.append(checkStructure);
                m_insertionSet.append(m_indexInBlock, checkStructureIndex);
                break;
            }
            if (isArguments)
                node.setOp(GetArgumentsLength);
            else if (isString)
                node.setOp(GetStringLength);
            else if (isInt8Array)
                node.setOp(GetInt8ArrayLength);
            else if (isInt16Array)
                node.setOp(GetInt16ArrayLength);
            else if (isInt32Array)
                node.setOp(GetInt32ArrayLength);
            else if (isUint8Array)
                node.setOp(GetUint8ArrayLength);
            else if (isUint8ClampedArray)
                node.setOp(GetUint8ClampedArrayLength);
            else if (isUint16Array)
                node.setOp(GetUint16ArrayLength);
            else if (isUint32Array)
                node.setOp(GetUint32ArrayLength);
            else if (isFloat32Array)
                node.setOp(GetFloat32ArrayLength);
            else if (isFloat64Array)
                node.setOp(GetFloat64ArrayLength);
            else
                ASSERT_NOT_REACHED();
            // No longer MustGenerate
            ASSERT(node.flags() & NodeMustGenerate);
            node.clearFlags(NodeMustGenerate);
            m_graph.deref(m_compileIndex);
            break;
        }
        case GetIndexedPropertyStorage: {
            if (!m_graph[node.child1()].prediction()
                || !m_graph[node.child2()].shouldSpeculateInteger()
                || m_graph[node.child1()].shouldSpeculateArguments()) {
                node.setOpAndDefaultFlags(Nop);
                m_graph.clearAndDerefChild1(node);
                m_graph.clearAndDerefChild2(node);
                m_graph.clearAndDerefChild3(node);
                node.setRefCount(0);
            }
            break;
        }
        case GetByVal:
        case StringCharAt:
        case StringCharCodeAt: {
            if (!!node.child3() && m_graph[node.child3()].op() == Nop)
                node.children.child3() = Edge();
            break;
        }
            
        case ValueToInt32: {
            if (m_graph[node.child1()].shouldSpeculateNumber()
                && node.mustGenerate()) {
                node.clearFlags(NodeMustGenerate);
                m_graph.deref(m_compileIndex);
            }
            break;
        }
            
        case BitAnd:
        case BitOr:
        case BitXor:
        case BitRShift:
        case BitLShift:
        case BitURShift: {
            fixIntEdge(node.children.child1());
            fixIntEdge(node.children.child2());
            break;
        }
            
        case CompareEq:
        case CompareLess:
        case CompareLessEq:
        case CompareGreater:
        case CompareGreaterEq:
        case CompareStrictEq: {
            if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]))
                break;
            if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()]))
                break;
            fixDoubleEdge(0);
            fixDoubleEdge(1);
            break;
        }
            
        case LogicalNot: {
            if (m_graph[node.child1()].shouldSpeculateInteger())
                break;
            if (!m_graph[node.child1()].shouldSpeculateNumber())
                break;
            fixDoubleEdge(0);
            break;
        }
            
        case Branch: {
            if (!m_graph[node.child1()].shouldSpeculateInteger()
                && m_graph[node.child1()].shouldSpeculateNumber())
                fixDoubleEdge(0);

            Node& myNode = m_graph[m_compileIndex]; // reload because the graph may have changed
            Edge logicalNotEdge = myNode.child1();
            Node& logicalNot = m_graph[logicalNotEdge];
            if (logicalNot.op() == LogicalNot
                && logicalNot.adjustedRefCount() == 1) {
                Edge newChildEdge = logicalNot.child1();
                if (m_graph[newChildEdge].hasBooleanResult()) {
                    m_graph.ref(newChildEdge);
                    m_graph.deref(logicalNotEdge);
                    myNode.children.setChild1(newChildEdge);
                    
                    BlockIndex toBeTaken = myNode.notTakenBlockIndex();
                    BlockIndex toBeNotTaken = myNode.takenBlockIndex();
                    myNode.setTakenBlockIndex(toBeTaken);
                    myNode.setNotTakenBlockIndex(toBeNotTaken);
                }
            }
            break;
        }
            
        case SetLocal: {
            if (node.variableAccessData()->isCaptured())
                break;
            if (!node.variableAccessData()->shouldUseDoubleFormat())
                break;
            fixDoubleEdge(0);
            break;
        }
            
        case ArithAdd:
        case ValueAdd: {
            if (m_graph.addShouldSpeculateInteger(node))
                break;
            if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()]))
                break;
            fixDoubleEdge(0);
            fixDoubleEdge(1);
            break;
        }
            
        case ArithSub: {
            if (m_graph.addShouldSpeculateInteger(node)
                && node.canSpeculateInteger())
                break;
            fixDoubleEdge(0);
            fixDoubleEdge(1);
            break;
        }
            
        case ArithNegate: {
            if (m_graph.negateShouldSpeculateInteger(node))
                break;
            fixDoubleEdge(0);
            break;
        }
            
        case ArithMin:
        case ArithMax:
        case ArithMod: {
            if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])
                && node.canSpeculateInteger())
                break;
            fixDoubleEdge(0);
            fixDoubleEdge(1);
            break;
        }
            
        case ArithMul: {
            if (m_graph.mulShouldSpeculateInteger(node))
                break;
            fixDoubleEdge(0);
            fixDoubleEdge(1);
            break;
        }

        case ArithDiv: {
            if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])
                && node.canSpeculateInteger()) {
                if (isX86())
                    break;
                fixDoubleEdge(0);
                fixDoubleEdge(1);
                
                Node& oldDivision = m_graph[m_compileIndex];
                
                Node newDivision = oldDivision;
                newDivision.setRefCount(2);
                newDivision.predict(SpecDouble);
                NodeIndex newDivisionIndex = m_graph.size();
                
                oldDivision.setOp(DoubleAsInt32);
                oldDivision.children.initialize(Edge(newDivisionIndex, DoubleUse), Edge(), Edge());
                
                m_graph.append(newDivision);
                m_insertionSet.append(m_indexInBlock, newDivisionIndex);
                
                break;
            }
            fixDoubleEdge(0);
            fixDoubleEdge(1);
            break;
        }
            
        case ArithAbs: {
            if (m_graph[node.child1()].shouldSpeculateInteger()
                && node.canSpeculateInteger())
                break;
            fixDoubleEdge(0);
            break;
        }
            
        case ArithSqrt: {
            fixDoubleEdge(0);
            break;
        }
            
        case PutByVal:
        case PutByValSafe: {
            Edge child1 = m_graph.varArgChild(node, 0);
            Edge child2 = m_graph.varArgChild(node, 1);
            Edge child3 = m_graph.varArgChild(node, 2);
            if (!m_graph[child1].prediction() || !m_graph[child2].prediction())
                break;
            if (!m_graph[child2].shouldSpeculateInteger())
                break;
            if (isActionableIntMutableArraySpeculation(m_graph[child1].prediction())) {
                if (m_graph[child3].isConstant())
                    break;
                if (m_graph[child3].shouldSpeculateInteger())
                    break;
                fixDoubleEdge(2);
                break;
            }
            if (isActionableFloatMutableArraySpeculation(m_graph[child1].prediction())) {
                fixDoubleEdge(2);
                break;
            }
            break;
        }
            
        default:
            break;
        }

#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
        if (!(node.flags() & NodeHasVarArgs)) {
            dataLog("new children: ");
            node.dumpChildren(WTF::dataFile());
        }
        dataLog("\n");
#endif
    }
Exemplo n.º 25
0
void CallFrame::setCurrentVPC(Instruction* vpc)
{
    setBytecodeOffsetForNonDFGCode(vpc - codeBlock()->instructions().begin());
}
Exemplo n.º 26
0
Instruction* CallFrame::currentVPC() const
{
    return codeBlock()->instructions().begin() + bytecodeOffsetForNonDFGCode();
}
Exemplo n.º 27
0
void CallFrame::setBytecodeOffsetForNonDFGCode(unsigned offset)
{
    ASSERT(codeBlock());
    setCurrentVPC(codeBlock()->instructions().begin() + offset);
}
Exemplo n.º 28
0
unsigned CallFrame::bytecodeOffsetForNonDFGCode() const
{
    ASSERT(codeBlock());
    return currentVPC() - codeBlock()->instructions().begin();
}
Exemplo n.º 29
0
CallFrame* CallFrame::trueCallFrame(AbstractPC pc)
{
    // Am I an inline call frame? If so, we're done.
    if (isInlineCallFrame())
        return this;
    
    // If I don't have a code block, then I'm not DFG code, so I'm the true call frame.
    CodeBlock* machineCodeBlock = codeBlock();
    if (!machineCodeBlock)
        return this;
    
    // If the code block does not have any code origins, then there was no inlining, so
    // I'm done.
    if (!machineCodeBlock->hasCodeOrigins())
        return this;
    
    // At this point the PC must be due either to the DFG, or it must be unset.
    ASSERT(pc.hasJITReturnAddress() || !pc);
    
    // Try to determine the CodeOrigin. If we don't have a pc set then the only way
    // that this makes sense is if the CodeOrigin index was set in the call frame.
    // FIXME: Note that you will see "Not currently in inlined code" comments below.
    // Currently, we do not record code origins for code that is not inlined, because
    // the only thing that we use code origins for is determining the inline stack.
    // But in the future, we'll want to use this same functionality (having a code
    // origin mapping for any calls out of JIT code) to determine the PC at any point
    // in the stack even if not in inlined code. When that happens, the code below
    // will have to change the way it detects the presence of inlining: it will always
    // get a code origin, but sometimes, that code origin will not have an inline call
    // frame. In that case, this method should bail and return this.
    CodeOrigin codeOrigin;
    if (pc.isSet()) {
        ReturnAddressPtr currentReturnPC = pc.jitReturnAddress();
        
        bool hasCodeOrigin = machineCodeBlock->codeOriginForReturn(currentReturnPC, codeOrigin);
        ASSERT_UNUSED(hasCodeOrigin, hasCodeOrigin);
    } else {
        unsigned index = codeOriginIndexForDFG();
        codeOrigin = machineCodeBlock->codeOrigin(index);
    }

    if (!codeOrigin.inlineCallFrame)
        return this; // Not currently in inlined code.
    
    for (InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; inlineCallFrame;) {
        InlineCallFrame* nextInlineCallFrame = inlineCallFrame->caller.inlineCallFrame;
        
        CallFrame* inlinedCaller = this + inlineCallFrame->stackOffset;
        
        JSFunction* calleeAsFunction = inlineCallFrame->callee.get();
        
        // Fill in the inlinedCaller
        inlinedCaller->setCodeBlock(machineCodeBlock);
        
        inlinedCaller->setScopeChain(calleeAsFunction->scope());
        if (nextInlineCallFrame)
            inlinedCaller->setCallerFrame(this + nextInlineCallFrame->stackOffset);
        else
            inlinedCaller->setCallerFrame(this);
        
        inlinedCaller->setInlineCallFrame(inlineCallFrame);
        inlinedCaller->setArgumentCountIncludingThis(inlineCallFrame->arguments.size());
        inlinedCaller->setCallee(calleeAsFunction);
        
        inlineCallFrame = nextInlineCallFrame;
    }
    
    return this + codeOrigin.inlineCallFrame->stackOffset;
}
Exemplo n.º 30
0
unsigned CallFrame::callSiteBitsAsBytecodeOffset() const
{
    ASSERT(codeBlock());
    ASSERT(callSiteBitsAreBytecodeOffset());
    return callSiteIndex().bits();
}