コード例 #1
0
ExceptionInfo* EvalExecutable::reparseExceptionInfo(AJGlobalData* globalData, ScopeChainNode* scopeChainNode, CodeBlock* codeBlock)
{
    RefPtr<EvalNode> newEvalBody = globalData->parser->parse<EvalNode>(globalData, 0, 0, m_source);

    ScopeChain scopeChain(scopeChainNode);
    AJGlobalObject* globalObject = scopeChain.globalObject();

    OwnPtr<EvalCodeBlock> newCodeBlock(new EvalCodeBlock(this, globalObject, source().provider(), scopeChain.localDepth()));

    OwnPtr<BytecodeGenerator> generator(new BytecodeGenerator(newEvalBody.get(), globalObject->debugger(), scopeChain, newCodeBlock->symbolTable(), newCodeBlock.get()));
    generator->setRegeneratingForExceptionInfo(static_cast<EvalCodeBlock*>(codeBlock));
    generator->generate();

    ASSERT(newCodeBlock->instructionCount() == codeBlock->instructionCount());

#if ENABLE(JIT)
#if ENABLE(INTERPRETER)
    if (globalData->canUseJIT())
#endif
    {
        JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), generatedJITCode().start());
        ASSERT(newJITCode.size() == generatedJITCode().size());
    }
#endif

    return newCodeBlock->extractExceptionInfo();
}
コード例 #2
0
ExceptionInfo* FunctionExecutable::reparseExceptionInfo(AJGlobalData* globalData, ScopeChainNode* scopeChainNode, CodeBlock* codeBlock)
{
    RefPtr<FunctionBodyNode> newFunctionBody = globalData->parser->parse<FunctionBodyNode>(globalData, 0, 0, m_source);
    if (m_forceUsesArguments)
        newFunctionBody->setUsesArguments();
    newFunctionBody->finishParsing(m_parameters, m_name);

    ScopeChain scopeChain(scopeChainNode);
    AJGlobalObject* globalObject = scopeChain.globalObject();

    OwnPtr<CodeBlock> newCodeBlock(new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset()));
    globalData->functionCodeBlockBeingReparsed = newCodeBlock.get();

    OwnPtr<BytecodeGenerator> generator(new BytecodeGenerator(newFunctionBody.get(), globalObject->debugger(), scopeChain, newCodeBlock->symbolTable(), newCodeBlock.get()));
    generator->setRegeneratingForExceptionInfo(static_cast<FunctionCodeBlock*>(codeBlock));
    generator->generate();

    ASSERT(newCodeBlock->instructionCount() == codeBlock->instructionCount());

#if ENABLE(JIT)
#if ENABLE(INTERPRETER)
    if (globalData->canUseJIT())
#endif
    {
        JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), generatedJITCode().start());
        ASSERT(newJITCode.size() == generatedJITCode().size());
    }
#endif

    globalData->functionCodeBlockBeingReparsed = 0;

    return newCodeBlock->extractExceptionInfo();
}
コード例 #3
0
ファイル: StackIterator.cpp プロジェクト: webOS-ports/webkit
void StackIterator::Frame::print(int indentLevel)
{
    int i = indentLevel;

    if (!this->callFrame()) {
        printif(i, "frame 0x0\n");
        return;
    }

    CodeBlock* codeBlock = this->codeBlock();
    printif(i, "frame %p {\n", this->callFrame());

    CallFrame* callFrame = m_callFrame;
    CallFrame* callerFrame = this->callerFrame();
    void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : 0;

    printif(i, "   name '%s'\n", functionName().utf8().data());
    printif(i, "   sourceURL '%s'\n", sourceURL().utf8().data());
    printif(i, "   hostFlag %d\n", callerFrame->hasHostCallFrameFlag());

#if ENABLE(DFG_JIT)
    printif(i, "   isInlinedFrame %d\n", isInlinedFrame());
    if (isInlinedFrame())
        printif(i, "   InlineCallFrame %p\n", m_inlineCallFrame);
#endif

    printif(i, "   callee %p\n", callee());
    printif(i, "   returnPC %p\n", returnPC);
    printif(i, "   callerFrame %p\n", callerFrame->removeHostCallFrameFlag());
    unsigned locationRawBits = callFrame->locationAsRawBits();
    printif(i, "   rawLocationBits %u 0x%x\n", locationRawBits, locationRawBits);
    printif(i, "   codeBlock %p\n", codeBlock);
    if (codeBlock) {
        JITCode::JITType jitType = codeBlock->jitType();
        if (callFrame->hasLocationAsBytecodeOffset()) {
            unsigned bytecodeOffset = callFrame->locationAsBytecodeOffset();
            printif(i, "      bytecodeOffset %u %p / %zu\n", bytecodeOffset, reinterpret_cast<void*>(bytecodeOffset), codeBlock->instructions().size());
#if ENABLE(DFG_JIT)
        } else {
            unsigned codeOriginIndex = callFrame->locationAsCodeOriginIndex();
            printif(i, "      codeOriginIdex %u %p / %zu\n", codeOriginIndex, reinterpret_cast<void*>(codeOriginIndex), codeBlock->codeOrigins().size());
#endif
        }
        unsigned line = 0;
        unsigned column = 0;
        computeLineAndColumn(line, column);
        printif(i, "      line %d\n", line);
        printif(i, "      column %d\n", column);
        printif(i, "      jitType %d <%s> isOptimizingJIT %d\n", jitType, jitTypeName(jitType), JITCode::isOptimizingJIT(jitType));
#if ENABLE(DFG_JIT)
        printif(i, "      hasCodeOrigins %d\n", codeBlock->hasCodeOrigins());
        if (codeBlock->hasCodeOrigins()) {
            JITCode* jitCode = codeBlock->jitCode().get();
            printif(i, "         jitCode %p start %p end %p\n", jitCode, jitCode->start(), jitCode->end());
        }
#endif
    }
    printif(i, "}\n");
}
コード例 #4
0
char* JIT_OPERATION triggerOSREntryNow(
    ExecState* exec, int32_t bytecodeIndex, int32_t streamIndex)
{
    VM* vm = &exec->vm();
    NativeCallFrameTracer tracer(vm, exec);
    DeferGC deferGC(vm->heap);
    CodeBlock* codeBlock = exec->codeBlock();
    
    if (codeBlock->jitType() != JITCode::DFGJIT) {
        dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n");
        RELEASE_ASSERT_NOT_REACHED();
    }
    
    JITCode* jitCode = codeBlock->jitCode()->dfg();
    
    if (Options::verboseOSR()) {
        dataLog(
            *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ",
            jitCode->tierUpCounter, "\n");
    }
    
    // - If we don't have an FTL code block, then try to compile one.
    // - If we do have an FTL code block, then try to enter for a while.
    // - If we couldn't enter for a while, then trigger OSR entry.
    
    triggerFTLReplacementCompile(vm, codeBlock, jitCode);

    if (!codeBlock->hasOptimizedReplacement())
        return 0;
    
    if (jitCode->osrEntryRetry < Options::ftlOSREntryRetryThreshold()) {
        jitCode->osrEntryRetry++;
        return 0;
    }
    
    // It's time to try to compile code for OSR entry.
    Worklist::State worklistState;
    if (Worklist* worklist = existingGlobalFTLWorklistOrNull()) {
        worklistState = worklist->completeAllReadyPlansForVM(
            *vm, CompilationKey(codeBlock->baselineVersion(), FTLForOSREntryMode));
    } else
        worklistState = Worklist::NotKnown;
    
    if (worklistState == Worklist::Compiling)
        return 0;
    
    if (CodeBlock* entryBlock = jitCode->osrEntryBlock.get()) {
        void* address = FTL::prepareOSREntry(
            exec, codeBlock, entryBlock, bytecodeIndex, streamIndex);
        if (address)
            return static_cast<char*>(address);
        
        FTL::ForOSREntryJITCode* entryCode = entryBlock->jitCode()->ftlForOSREntry();
        entryCode->countEntryFailure();
        if (entryCode->entryFailureCount() <
            Options::ftlOSREntryFailureCountForReoptimization())
            return 0;
        
        // OSR entry failed. Oh no! This implies that we need to retry. We retry
        // without exponential backoff and we only do this for the entry code block.
        jitCode->osrEntryBlock.clear();
        jitCode->osrEntryRetry = 0;
        return 0;
    }
    
    if (worklistState == Worklist::Compiled) {
        // This means that compilation failed and we already set the thresholds.
        if (Options::verboseOSR())
            dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
        return 0;
    }

    // We aren't compiling and haven't compiled anything for OSR entry. So, try to compile
    // something.
    Operands<JSValue> mustHandleValues;
    jitCode->reconstruct(
        exec, codeBlock, CodeOrigin(bytecodeIndex), streamIndex, mustHandleValues);
    RefPtr<CodeBlock> replacementCodeBlock = codeBlock->newReplacement();
    CompilationResult forEntryResult = compile(
        *vm, replacementCodeBlock.get(), codeBlock, FTLForOSREntryMode, bytecodeIndex,
        mustHandleValues, ToFTLForOSREntryDeferredCompilationCallback::create(codeBlock));
    
    if (forEntryResult != CompilationSuccessful) {
        ASSERT(forEntryResult == CompilationDeferred || replacementCodeBlock->hasOneRef());
        return 0;
    }

    // It's possible that the for-entry compile already succeeded. In that case OSR
    // entry will succeed unless we ran out of stack. It's not clear what we should do.
    // We signal to try again after a while if that happens.
    void* address = FTL::prepareOSREntry(
        exec, codeBlock, jitCode->osrEntryBlock.get(), bytecodeIndex, streamIndex);
    return static_cast<char*>(address);
}
コード例 #5
0
ファイル: DFGOSREntry.cpp プロジェクト: LuXiong/webkit
SUPPRESS_ASAN
void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
{
    ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
    ASSERT(codeBlock->alternative());
    ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
    ASSERT(!codeBlock->jitCodeMap());

    if (!Options::useOSREntryToDFG())
        return 0;

    if (Options::verboseOSR()) {
        dataLog(
            "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
            " from bc#", bytecodeIndex, "\n");
    }
    
    VM* vm = &exec->vm();

    sanitizeStackForVM(vm);
    
    if (bytecodeIndex)
        codeBlock->ownerScriptExecutable()->setDidTryToEnterInLoop(true);
    
    if (codeBlock->jitType() != JITCode::DFGJIT) {
        RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT);
        
        // When will this happen? We could have:
        //
        // - An exit from the FTL JIT into the baseline JIT followed by an attempt
        //   to reenter. We're fine with allowing this to fail. If it happens
        //   enough we'll just reoptimize. It basically means that the OSR exit cost
        //   us dearly and so reoptimizing is the right thing to do.
        //
        // - We have recursive code with hot loops. Consider that foo has a hot loop
        //   that calls itself. We have two foo's on the stack, lets call them foo1
        //   and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
        //   optimized all the way into the FTL. Then it returns into foo1, and then
        //   foo1 wants to get optimized. It might reach this conclusion from its
        //   hot loop and attempt to OSR enter. And we'll tell it that it can't. It
        //   might be worth addressing this case, but I just think this case will
        //   be super rare. For now, if it does happen, it'll cause some compilation
        //   thrashing.
        
        if (Options::verboseOSR())
            dataLog("    OSR failed because the target code block is not DFG.\n");
        return 0;
    }
    
    JITCode* jitCode = codeBlock->jitCode()->dfg();
    OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
    
    if (!entry) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because the entrypoint was optimized out.\n");
        return 0;
    }
    
    ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
    
    // The code below checks if it is safe to perform OSR entry. It may find
    // that it is unsafe to do so, for any number of reasons, which are documented
    // below. If the code decides not to OSR then it returns 0, and it's the caller's
    // responsibility to patch up the state in such a way as to ensure that it's
    // both safe and efficient to continue executing baseline code for now. This
    // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
    // or codeBlock->dontOptimizeAnytimeSoon().
    
    // 1) Verify predictions. If the predictions are inconsistent with the actual
    //    values, then OSR entry is not possible at this time. It's tempting to
    //    assume that we could somehow avoid this case. We can certainly avoid it
    //    for first-time loop OSR - that is, OSR into a CodeBlock that we have just
    //    compiled. Then we are almost guaranteed that all of the predictions will
    //    check out. It would be pretty easy to make that a hard guarantee. But
    //    then there would still be the case where two call frames with the same
    //    baseline CodeBlock are on the stack at the same time. The top one
    //    triggers compilation and OSR. In that case, we may no longer have
    //    accurate value profiles for the one deeper in the stack. Hence, when we
    //    pop into the CodeBlock that is deeper on the stack, we might OSR and
    //    realize that the predictions are wrong. Probably, in most cases, this is
    //    just an anomaly in the sense that the older CodeBlock simply went off
    //    into a less-likely path. So, the wisest course of action is to simply not
    //    OSR at this time.
    
    for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
        if (argument >= exec->argumentCountIncludingThis()) {
            if (Options::verboseOSR()) {
                dataLogF("    OSR failed because argument %zu was not passed, expected ", argument);
                entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
                dataLogF(".\n");
            }
            return 0;
        }
        
        JSValue value;
        if (!argument)
            value = exec->thisValue();
        else
            value = exec->argument(argument - 1);
        
        if (!entry->m_expectedValues.argument(argument).validate(value)) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because argument ", argument, " is ", value,
                    ", expected ", entry->m_expectedValues.argument(argument), ".\n");
            }
            return 0;
        }
    }
    
    for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
        int localOffset = virtualRegisterForLocal(local).offset();
        if (entry->m_localsForcedDouble.get(local)) {
            if (!exec->registers()[localOffset].asanUnsafeJSValue().isNumber()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].asanUnsafeJSValue(), ", expected number.\n");
                }
                return 0;
            }
            continue;
        }
        if (entry->m_localsForcedAnyInt.get(local)) {
            if (!exec->registers()[localOffset].asanUnsafeJSValue().isAnyInt()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
                        "machine int.\n");
                }
                return 0;
            }
            continue;
        }
        if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].asanUnsafeJSValue())) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because variable ", localOffset, " is ",
                    exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
                    entry->m_expectedValues.local(local), ".\n");
            }
            return 0;
        }
    }

    // 2) Check the stack height. The DFG JIT may require a taller stack than the
    //    baseline JIT, in some cases. If we can't grow the stack, then don't do
    //    OSR right now. That's the only option we have unless we want basic block
    //    boundaries to start throwing RangeErrors. Although that would be possible,
    //    it seems silly: you'd be diverting the program to error handling when it
    //    would have otherwise just kept running albeit less quickly.
    
    unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm->interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()])) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    if (Options::verboseOSR())
        dataLogF("    OSR should succeed.\n");

    // At this point we're committed to entering. We will do some work to set things up,
    // but we also rely on our caller recognizing that when we return a non-null pointer,
    // that means that we're already past the point of no return and we must succeed at
    // entering.
    
    // 3) Set up the data in the scratch buffer and perform data format conversions.

    unsigned frameSize = jitCode->common.frameRegisterCount;
    unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
    unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);

    Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + JSStack::CallFrameHeaderSize + maxFrameSize))->dataBuffer());
    
    *bitwise_cast<size_t*>(scratch + 0) = frameSize;
    
    void* targetPC = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset);
    if (Options::verboseOSR())
        dataLogF("    OSR using target PC %p.\n", targetPC);
    RELEASE_ASSERT(targetPC);
    *bitwise_cast<void**>(scratch + 1) = targetPC;
    
    Register* pivot = scratch + 2 + JSStack::CallFrameHeaderSize;
    
    for (int index = -JSStack::CallFrameHeaderSize; index < static_cast<int>(baselineFrameSize); ++index) {
        VirtualRegister reg(-1 - index);
        
        if (reg.isLocal()) {
            if (entry->m_localsForcedDouble.get(reg.toLocal())) {
                *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
                continue;
            }
            
            if (entry->m_localsForcedAnyInt.get(reg.toLocal())) {
                *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asAnyInt() << JSValue::int52ShiftAmount;
                continue;
            }
        }
        
        pivot[index] = exec->registers()[reg.offset()].asanUnsafeJSValue();
    }
    
    // 4) Reshuffle those registers that need reshuffling.
    Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
    
    // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
    //    some programs by eliminating some stale pointer pathologies.
    for (unsigned i = frameSize; i--;) {
        if (entry->m_machineStackUsed.get(i))
            continue;
        pivot[i] = JSValue();
    }

    // 6) Copy our callee saves to buffer.
#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
    RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
    RegisterAtOffsetList* allCalleeSaves = vm->getAllCalleeSaveRegisterOffsets();
    RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());

    unsigned registerCount = registerSaveLocations->size();
    VMEntryRecord* record = vmEntryRecord(vm->topVMEntryFrame);
    for (unsigned i = 0; i < registerCount; i++) {
        RegisterAtOffset currentEntry = registerSaveLocations->at(i);
        if (dontSaveRegisters.get(currentEntry.reg()))
            continue;
        RegisterAtOffset* calleeSavesEntry = allCalleeSaves->find(currentEntry.reg());
        
        *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
    }
#endif
    
    // 7) Fix the call frame to have the right code block.
    
    *bitwise_cast<CodeBlock**>(pivot - 1 - JSStack::CodeBlock) = codeBlock;
    
    if (Options::verboseOSR())
        dataLogF("    OSR returning data buffer %p.\n", scratch);
    return scratch;
}
コード例 #6
0
void StackVisitor::Frame::dump(PrintStream& out, Indenter indent, WTF::Function<void(PrintStream&)> prefix) const
{
    if (!this->callFrame()) {
        out.print(indent, "frame 0x0\n");
        return;
    }

    CodeBlock* codeBlock = this->codeBlock();
    out.print(indent);
    prefix(out);
    out.print("frame ", RawPointer(this->callFrame()), " {\n");

    {
        indent++;

        CallFrame* callFrame = m_callFrame;
        CallFrame* callerFrame = this->callerFrame();
        const void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr;

        out.print(indent, "name: ", functionName(), "\n");
        out.print(indent, "sourceURL: ", sourceURL(), "\n");

        bool isInlined = false;
#if ENABLE(DFG_JIT)
        isInlined = isInlinedFrame();
        out.print(indent, "isInlinedFrame: ", isInlinedFrame(), "\n");
        if (isInlinedFrame())
            out.print(indent, "InlineCallFrame: ", RawPointer(m_inlineCallFrame), "\n");
#endif

        out.print(indent, "callee: ", RawPointer(callee().rawPtr()), "\n");
        out.print(indent, "returnPC: ", RawPointer(returnPC), "\n");
        out.print(indent, "callerFrame: ", RawPointer(callerFrame), "\n");
        uintptr_t locationRawBits = callFrame->callSiteAsRawBits();
        out.print(indent, "rawLocationBits: ", locationRawBits,
            " ", RawPointer(reinterpret_cast<void*>(locationRawBits)), "\n");
        out.print(indent, "codeBlock: ", RawPointer(codeBlock));
        if (codeBlock)
            out.print(" ", *codeBlock);
        out.print("\n");
        if (codeBlock && !isInlined) {
            indent++;

            if (callFrame->callSiteBitsAreBytecodeOffset()) {
                unsigned bytecodeOffset = callFrame->bytecodeOffset();
                out.print(indent, "bytecodeOffset: ", bytecodeOffset, " of ", codeBlock->instructions().size(), "\n");
#if ENABLE(DFG_JIT)
            } else {
                out.print(indent, "hasCodeOrigins: ", codeBlock->hasCodeOrigins(), "\n");
                if (codeBlock->hasCodeOrigins()) {
                    CallSiteIndex callSiteIndex = callFrame->callSiteIndex();
                    out.print(indent, "callSiteIndex: ", callSiteIndex.bits(), " of ", codeBlock->codeOrigins().size(), "\n");

                    JITCode::JITType jitType = codeBlock->jitType();
                    if (jitType != JITCode::FTLJIT) {
                        JITCode* jitCode = codeBlock->jitCode().get();
                        out.print(indent, "jitCode: ", RawPointer(jitCode),
                            " start ", RawPointer(jitCode->start()),
                            " end ", RawPointer(jitCode->end()), "\n");
                    }
                }
#endif
            }
            unsigned line = 0;
            unsigned column = 0;
            computeLineAndColumn(line, column);
            out.print(indent, "line: ", line, "\n");
            out.print(indent, "column: ", column, "\n");

            indent--;
        }
        out.print(indent, "EntryFrame: ", RawPointer(m_entryFrame), "\n");
        indent--;
    }
    out.print(indent, "}\n");
}
コード例 #7
0
ファイル: StackVisitor.cpp プロジェクト: EQ4/webkit-mips
void StackVisitor::Frame::print(int indent)
{
    if (!this->callFrame()) {
        log(indent, "frame 0x0\n");
        return;
    }

    CodeBlock* codeBlock = this->codeBlock();
    logF(indent, "frame %p {\n", this->callFrame());

    {
        indent++;

        CallFrame* callFrame = m_callFrame;
        CallFrame* callerFrame = this->callerFrame();
        void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr;

        log(indent, "name: ", functionName(), "\n");
        log(indent, "sourceURL: ", sourceURL(), "\n");

        bool isInlined = false;
#if ENABLE(DFG_JIT)
        isInlined = isInlinedFrame();
        log(indent, "isInlinedFrame: ", isInlinedFrame(), "\n");
        if (isInlinedFrame())
            logF(indent, "InlineCallFrame: %p\n", m_inlineCallFrame);
#endif

        logF(indent, "callee: %p\n", callee());
        logF(indent, "returnPC: %p\n", returnPC);
        logF(indent, "callerFrame: %p\n", callerFrame);
        unsigned locationRawBits = callFrame->callSiteAsRawBits();
        logF(indent, "rawLocationBits: %u 0x%x\n", locationRawBits, locationRawBits);
        logF(indent, "codeBlock: %p ", codeBlock);
        if (codeBlock)
            dataLog(*codeBlock);
        dataLog("\n");
        if (codeBlock && !isInlined) {
            indent++;

            if (callFrame->callSiteBitsAreBytecodeOffset()) {
                unsigned bytecodeOffset = callFrame->bytecodeOffset();
                log(indent, "bytecodeOffset: ", bytecodeOffset, " of ", codeBlock->instructions().size(), "\n");
#if ENABLE(DFG_JIT)
            } else {
                log(indent, "hasCodeOrigins: ", codeBlock->hasCodeOrigins(), "\n");
                if (codeBlock->hasCodeOrigins()) {
                    CallSiteIndex callSiteIndex = callFrame->callSiteIndex();
                    log(indent, "callSiteIndex: ", callSiteIndex.bits(), " of ", codeBlock->codeOrigins().size(), "\n");

                    JITCode::JITType jitType = codeBlock->jitType();
                    if (jitType != JITCode::FTLJIT) {
                        JITCode* jitCode = codeBlock->jitCode().get();
                        logF(indent, "jitCode: %p start %p end %p\n", jitCode, jitCode->start(), jitCode->end());
                    }
                }
#endif
            }
            unsigned line = 0;
            unsigned column = 0;
            computeLineAndColumn(line, column);
            log(indent, "line: ", line, "\n");
            log(indent, "column: ", column, "\n");

            indent--;
        }
        indent--;
    }
    log(indent, "}\n");
}