Exemple #1
0
void JITCode::reconstruct(
    ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
    Operands<JSValue>& result)
{
    Operands<ValueRecovery> recoveries;
    reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
    
    result = Operands<JSValue>(OperandsLike, recoveries);
    for (size_t i = result.size(); i--;) {
        int operand = result.operandForIndex(i);
        
        if (operandIsArgument(operand)
            && !VirtualRegister(operand).toArgument()
            && codeBlock->codeType() == FunctionCode
            && codeBlock->specializationKind() == CodeForConstruct) {
            // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
            // also never be used. It doesn't matter what we put into the value for this,
            // but it has to be an actual value that can be grokked by subsequent DFG passes,
            // so we sanitize it here by turning it into Undefined.
            result[i] = jsUndefined();
            continue;
        }
        
        ValueRecovery recovery = recoveries[i];
        JSValue value;
        switch (recovery.technique()) {
        case AlreadyInJSStack:
        case AlreadyInJSStackAsUnboxedCell:
        case AlreadyInJSStackAsUnboxedBoolean:
            value = exec->r(operand).jsValue();
            break;
        case AlreadyInJSStackAsUnboxedInt32:
            value = jsNumber(exec->r(operand).unboxedInt32());
            break;
        case AlreadyInJSStackAsUnboxedInt52:
            value = jsNumber(exec->r(operand).unboxedInt52());
            break;
        case AlreadyInJSStackAsUnboxedDouble:
            value = jsDoubleNumber(exec->r(operand).unboxedDouble());
            break;
        case Constant:
            value = recovery.constant();
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
        result[i] = value;
    }
}
Exemple #2
0
 void clobber(const Operands<VariableAccessData*>& live)
 {
     for (size_t i = live.size(); i--;) {
         VariableAccessData* variable = live[i];
         if (!variable)
             continue;
         noticeClobber(variable);
     }
 }
void* prepareOSREntry(
    ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
    unsigned bytecodeIndex, unsigned streamIndex)
{
    VM& vm = exec->vm();
    CodeBlock* baseline = dfgCodeBlock->baselineVersion();
    DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
    ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
    
    if (Options::verboseOSR()) {
        dataLog(
            "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
            bytecodeIndex, ".\n");
    }
    
    if (bytecodeIndex != entryCode->bytecodeIndex()) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex());
        return 0;
    }
    
    Operands<JSValue> values;
    dfgCode->reconstruct(
        exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
    
    if (Options::verboseOSR())
        dataLog("    Values at entry: ", values, "\n");
    
    for (int argument = values.numberOfArguments(); argument--;) {
        RELEASE_ASSERT(
            exec->r(virtualRegisterForArgument(argument).offset()).jsValue() == values.argument(argument));
    }
    
    RELEASE_ASSERT(
        static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
    
    EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
        entryCode->entryBuffer()->dataBuffer());
    
    for (int local = values.numberOfLocals(); local--;)
        scratch[local] = JSValue::encode(values.local(local));
    
    int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(stackFrameSize).offset()])) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    exec->setCodeBlock(entryCodeBlock);
    
    void* result = entryCode->addressForCall().executableAddress();
    if (Options::verboseOSR())
        dataLog("    Entry will succeed, going to address", RawPointer(result), "\n");
    
    return result;
}
Exemple #4
0
 void checkOperand(
     BlockIndex blockIndex, Operands<size_t>& getLocalPositions,
     Operands<size_t>& setLocalPositions, int operand)
 {
     if (getLocalPositions.operand(operand) == notSet)
         return;
     if (setLocalPositions.operand(operand) == notSet)
         return;
     
     BasicBlock* block = m_graph.m_blocks[blockIndex].get();
     
     VALIDATE(
         (block->at(getLocalPositions.operand(operand)),
          block->at(setLocalPositions.operand(operand)),
          blockIndex),
         getLocalPositions.operand(operand) < setLocalPositions.operand(operand));
 }
Exemple #5
0
SUPPRESS_ASAN
void* prepareOSREntry(
    ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
    unsigned bytecodeIndex, unsigned streamIndex)
{
    VM& vm = exec->vm();
    CodeBlock* baseline = dfgCodeBlock->baselineVersion();
    ExecutableBase* executable = dfgCodeBlock->ownerExecutable();
    DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
    ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
    
    if (Options::verboseOSR()) {
        dataLog(
            "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
            bytecodeIndex, ".\n");
    }
    
    if (bytecodeIndex)
        jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true);

    if (bytecodeIndex != entryCode->bytecodeIndex()) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n");
        return 0;
    }
    
    Operands<JSValue> values;
    dfgCode->reconstruct(
        exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
    
    if (Options::verboseOSR())
        dataLog("    Values at entry: ", values, "\n");
    
    for (int argument = values.numberOfArguments(); argument--;) {
        JSValue valueOnStack = exec->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue();
        JSValue reconstructedValue = values.argument(argument);
        if (valueOnStack == reconstructedValue || !argument)
            continue;
        dataLog("Mismatch between reconstructed values and the the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n");
        dataLog("    Value on stack: ", valueOnStack, "\n");
        dataLog("    Reconstructed value: ", reconstructedValue, "\n");
        RELEASE_ASSERT_NOT_REACHED();
    }
    
    RELEASE_ASSERT(
        static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
    
    EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
        entryCode->entryBuffer()->dataBuffer());
    
    for (int local = values.numberOfLocals(); local--;)
        scratch[local] = JSValue::encode(values.local(local));
    
    int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm.interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()])) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    exec->setCodeBlock(entryCodeBlock);
    
    void* result = entryCode->addressForCall(
        vm, executable, ArityCheckNotRequired,
        RegisterPreservationNotRequired).executableAddress();
    if (Options::verboseOSR())
        dataLog("    Entry will succeed, going to address", RawPointer(result), "\n");
    
    return result;
}
 bool run()
 {
     ASSERT(m_graph.m_form == SSA);
     
     for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
         BasicBlock* block = m_graph.block(blockIndex);
         if (!block)
             continue;
         block->ssa->availabilityAtHead.fill(Availability());
         block->ssa->availabilityAtTail.fill(Availability());
     }
     
     BasicBlock* root = m_graph.block(0);
     for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) {
         root->ssa->availabilityAtHead.argument(argument) =
             Availability::unavailable().withFlush(
                 FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument)));
     }
     for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;)
         root->ssa->availabilityAtHead.local(local) = Availability::unavailable();
     
     if (m_graph.m_plan.mode == FTLForOSREntryMode) {
         for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) {
             root->ssa->availabilityAtHead.local(local) =
                 Availability::unavailable().withFlush(
                     FlushedAt(FlushedJSValue, virtualRegisterForLocal(local)));
         }
     }
     
     // This could be made more efficient by processing blocks in reverse postorder.
     Operands<Availability> availability;
     bool changed;
     do {
         changed = false;
         
         for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
             BasicBlock* block = m_graph.block(blockIndex);
             if (!block)
                 continue;
             
             availability = block->ssa->availabilityAtHead;
             
             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                 Node* node = block->at(nodeIndex);
                 
                 switch (node->op()) {
                 case SetLocal: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) =
                         Availability(node->child1().node(), variable->flushedAt());
                     break;
                 }
                     
                 case GetArgument: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) =
                         Availability(node, variable->flushedAt());
                     break;
                 }
                     
                 case MovHint:
                 case MovHintAndCheck: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) =
                         Availability(node->child1().node());
                     break;
                 }
                     
                 case ZombieHint: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) = Availability::unavailable();
                     break;
                 }
                     
                 default:
                     break;
                 }
             }
             
             if (availability == block->ssa->availabilityAtTail)
                 continue;
             
             block->ssa->availabilityAtTail = availability;
             changed = true;
             
             for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
                 BasicBlock* successor = block->successor(successorIndex);
                 for (unsigned i = availability.size(); i--;) {
                     successor->ssa->availabilityAtHead[i] = availability[i].merge(
                         successor->ssa->availabilityAtHead[i]);
                 }
             }
         }
     } while (changed);
     
     return true;
 }
Exemple #7
0
void compileOSRExit(ExecState* exec)
{
    SamplingRegion samplingRegion("DFG OSR Exit Compilation");
    
    CodeBlock* codeBlock = exec->codeBlock();
    
    ASSERT(codeBlock);
    ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
    
    JSGlobalData* globalData = &exec->globalData();
    
    uint32_t exitIndex = globalData->osrExitIndex;
    OSRExit& exit = codeBlock->osrExit(exitIndex);
    
    // Make sure all code on our inline stack is JIT compiled. This is necessary since
    // we may opt to inline a code block even before it had ever been compiled by the
    // JIT, but our OSR exit infrastructure currently only works if the target of the
    // OSR exit is JIT code. This could be changed since there is nothing particularly
    // hard about doing an OSR exit into the interpreter, but for now this seems to make
    // sense in that if we're OSR exiting from inlined code of a DFG code block, then
    // probably it's a good sign that the thing we're exiting into is hot. Even more
    // interestingly, since the code was inlined, it may never otherwise get JIT
    // compiled since the act of inlining it may ensure that it otherwise never runs.
    for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())
            ->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct)
            ->jitCompile(exec);
    }
    
    // Compute the value recoveries.
    Operands<ValueRecovery> operands;
    codeBlock->variableEventStream().reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->minifiedDFG(), exit.m_streamIndex, operands);
    
    // There may be an override, for forward speculations.
    if (!!exit.m_valueRecoveryOverride) {
        operands.setOperand(
            exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
    }
    
    SpeculationRecovery* recovery = 0;
    if (exit.m_recoveryIndex)
        recovery = &codeBlock->speculationRecovery(exit.m_recoveryIndex - 1);

#if DFG_ENABLE(DEBUG_VERBOSE)
    dataLog(
        "Generating OSR exit #", exitIndex, " (seq#", exit.m_streamIndex,
        ", bc#", exit.m_codeOrigin.bytecodeIndex, ", @", exit.m_nodeIndex, ", ",
        exit.m_kind, ") for ", *codeBlock, ".\n");
#endif

    {
        CCallHelpers jit(globalData, codeBlock);
        OSRExitCompiler exitCompiler(jit);

        jit.jitAssertHasValidCallFrame();
        
        if (globalData->m_perBytecodeProfiler && codeBlock->compilation()) {
            Profiler::Database& database = *globalData->m_perBytecodeProfiler;
            Profiler::Compilation* compilation = codeBlock->compilation();
            
            Profiler::OSRExit* profilerExit = compilation->addOSRExit(
                exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
                exit.m_kind,
                exit.m_watchpointIndex != std::numeric_limits<unsigned>::max());
            jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
        }
        
        exitCompiler.compileExit(exit, operands, recovery);
        
        LinkBuffer patchBuffer(*globalData, &jit, codeBlock);
        exit.m_code = FINALIZE_CODE_IF(
            shouldShowDisassembly(),
            patchBuffer,
            ("DFG OSR exit #%u (bc#%u, @%u, %s) from %s",
                exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex,
                exitKindToString(exit.m_kind), toCString(*codeBlock).data()));
    }
    
    {
        RepatchBuffer repatchBuffer(codeBlock);
        repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
    }
    
    globalData->osrExitJumpDestination = exit.m_code.code().executableAddress();
}
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
    // 1) Pro-forma stuff.
    if (Options::printEachOSRExit()) {
        SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
        debugInfo->codeBlock = m_jit.codeBlock();
        debugInfo->kind = exit.m_kind;
        debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
        
        m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
    }
    
    // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit.
    m_jit.addPtr(
        CCallHelpers::TrustedImm32(
            -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
        CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
    
    // 2) Perform speculation recovery. This only comes into play when an operation
    //    starts mutating state before verifying the speculation it has already made.
    
    if (recovery) {
        switch (recovery->type()) {
        case SpeculativeAdd:
            m_jit.sub32(recovery->src(), recovery->dest());
            break;
            
        case BooleanSpeculationCheck:
            break;
            
        default:
            break;
        }
    }

    // 3) Refine some value profile, if appropriate.
    
    if (!!exit.m_jsValueSource) {
        if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
            // If the instruction that this originated from has an array profile, then
            // refine it. If it doesn't, then do nothing. The latter could happen for
            // hoisted checks, or checks emitted for operations that didn't have array
            // profiling - either ops that aren't array accesses at all, or weren't
            // known to be array acceses in the bytecode. The latter case is a FIXME
            // while the former case is an outcome of a CheckStructure not knowing why
            // it was emitted (could be either due to an inline cache of a property
            // property access, or due to an array profile).
            
            // Note: We are free to assume that the jsValueSource is already known to
            // be a cell since both BadCache and BadIndexingType exits occur after
            // the cell check would have already happened.
            
            CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
            if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
                GPRReg usedRegister1;
                GPRReg usedRegister2;
                if (exit.m_jsValueSource.isAddress()) {
                    usedRegister1 = exit.m_jsValueSource.base();
                    usedRegister2 = InvalidGPRReg;
                } else {
                    usedRegister1 = exit.m_jsValueSource.payloadGPR();
                    if (exit.m_jsValueSource.hasKnownTag())
                        usedRegister2 = InvalidGPRReg;
                    else
                        usedRegister2 = exit.m_jsValueSource.tagGPR();
                }
                
                GPRReg scratch1;
                GPRReg scratch2;
                scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
                scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
                
#if CPU(ARM64)
                m_jit.pushToSave(scratch1);
                m_jit.pushToSave(scratch2);
#else
                m_jit.push(scratch1);
                m_jit.push(scratch2);
#endif
                
                GPRReg value;
                if (exit.m_jsValueSource.isAddress()) {
                    value = scratch1;
                    m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
                } else
                    value = exit.m_jsValueSource.payloadGPR();
                
                m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
                m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
                m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
                m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
                m_jit.lshift32(scratch1, scratch2);
                m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
                
#if CPU(ARM64)
                m_jit.popToRestore(scratch2);
                m_jit.popToRestore(scratch1);
#else
                m_jit.pop(scratch2);
                m_jit.pop(scratch1);
#endif
            }
        }
        
        if (!!exit.m_valueProfile) {
            EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
        
            if (exit.m_jsValueSource.isAddress()) {
                // Save a register so we can use it.
                GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
                
#if CPU(ARM64)
                m_jit.pushToSave(scratch);
#else
                m_jit.push(scratch);
#endif

                m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
                m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
                m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
                
#if CPU(ARM64)
                m_jit.popToRestore(scratch);
#else
                m_jit.pop(scratch);
#endif
            } else if (exit.m_jsValueSource.hasKnownTag()) {
                m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
            } else {
                m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
            }
        }
    }
    
    // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
    // do this simple approach.

    // 4) Save all state from GPRs into the scratch buffer.
    
    ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
    EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case UnboxedInt32InGPR:
        case UnboxedBooleanInGPR:
        case UnboxedCellInGPR:
            m_jit.store32(
                recovery.gpr(),
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
            break;
            
        case InPair:
            m_jit.store32(
                recovery.tagGPR(),
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
            m_jit.store32(
                recovery.payloadGPR(),
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
            break;
            
        default:
            break;
        }
    }
    
    // Now all GPRs are free to reuse.
    
    // 5) Save all state from FPRs into the scratch buffer.
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case InFPR:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
            m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
            break;
            
        default:
            break;
        }
    }
    
    // Now all FPRs are free to reuse.
    
    // 6) Save all state from the stack into the scratch buffer. For simplicity we
    //    do this even for state that's already in the right place on the stack.
    //    It makes things simpler later.
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case DisplacedInJSStack:
        case Int32DisplacedInJSStack:
        case DoubleDisplacedInJSStack:
        case CellDisplacedInJSStack:
        case BooleanDisplacedInJSStack:
            m_jit.load32(
                AssemblyHelpers::tagFor(recovery.virtualRegister()),
                GPRInfo::regT0);
            m_jit.load32(
                AssemblyHelpers::payloadFor(recovery.virtualRegister()),
                GPRInfo::regT1);
            m_jit.store32(
                GPRInfo::regT0,
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
            m_jit.store32(
                GPRInfo::regT1,
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
            break;
            
        default:
            break;
        }
    }
    
    // 7) Do all data format conversions and store the results into the stack.
    
    bool haveArguments = false;
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        int operand = operands.operandForIndex(index);
        
        switch (recovery.technique()) {
        case InPair:
        case DisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
                GPRInfo::regT0);
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT1);
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT1,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case InFPR:
        case DoubleDisplacedInJSStack:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
            m_jit.loadDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
            m_jit.purifyNaN(FPRInfo::fpRegT0);
            m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
            break;

        case UnboxedInt32InGPR:
        case Int32DisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT0);
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue::Int32Tag),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case UnboxedCellInGPR:
        case CellDisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT0);
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue::CellTag),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case UnboxedBooleanInGPR:
        case BooleanDisplacedInJSStack:
            m_jit.load32(
                &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
                GPRInfo::regT0);
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue::BooleanTag),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                GPRInfo::regT0,
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case Constant:
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(recovery.constant().tag()),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(recovery.constant().payload()),
                AssemblyHelpers::payloadFor(operand));
            break;
            
        case ArgumentsThatWereNotCreated:
            haveArguments = true;
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue().tag()),
                AssemblyHelpers::tagFor(operand));
            m_jit.store32(
                AssemblyHelpers::TrustedImm32(JSValue().payload()),
                AssemblyHelpers::payloadFor(operand));
            break;
            
        default:
            break;
        }
    }
    
    // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
    //    that all new calls into this code will go to the new JIT, so the execute
    //    counter only affects call frames that performed OSR exit and call frames
    //    that were still executing the old JIT at the time of another call frame's
    //    OSR exit. We want to ensure that the following is true:
    //
    //    (a) Code the performs an OSR exit gets a chance to reenter optimized
    //        code eventually, since optimized code is faster. But we don't
    //        want to do such reentery too aggressively (see (c) below).
    //
    //    (b) If there is code on the call stack that is still running the old
    //        JIT's code and has never OSR'd, then it should get a chance to
    //        perform OSR entry despite the fact that we've exited.
    //
    //    (c) Code the performs an OSR exit should not immediately retry OSR
    //        entry, since both forms of OSR are expensive. OSR entry is
    //        particularly expensive.
    //
    //    (d) Frequent OSR failures, even those that do not result in the code
    //        running in a hot loop, result in recompilation getting triggered.
    //
    //    To ensure (c), we'd like to set the execute counter to
    //    counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
    //    (a) and (b), since then every OSR exit would delay the opportunity for
    //    every call frame to perform OSR entry. Essentially, if OSR exit happens
    //    frequently and the function has few loops, then the counter will never
    //    become non-negative and OSR entry will never be triggered. OSR entry
    //    will only happen if a loop gets hot in the old JIT, which does a pretty
    //    good job of ensuring (a) and (b). But that doesn't take care of (d),
    //    since each speculation failure would reset the execute counter.
    //    So we check here if the number of speculation failures is significantly
    //    larger than the number of successes (we want 90% success rate), and if
    //    there have been a large enough number of failures. If so, we set the
    //    counter to 0; otherwise we set the counter to
    //    counterValueForOptimizeAfterWarmUp().
    
    handleExitCounts(m_jit, exit);
    
    // 9) Reify inlined call frames.
    
    reifyInlinedCallFrames(m_jit, exit);
    
    // 10) Create arguments if necessary and place them into the appropriate aliased
    //     registers.
    
    if (haveArguments) {
        ArgumentsRecoveryGenerator argumentsRecovery;

        for (size_t index = 0; index < operands.size(); ++index) {
            const ValueRecovery& recovery = operands[index];
            if (recovery.technique() != ArgumentsThatWereNotCreated)
                continue;
            argumentsRecovery.generateFor(
                operands.operandForIndex(index), exit.m_codeOrigin, m_jit);
        }
    }

    // 12) And finish.
    adjustAndJumpToTarget(m_jit, exit);
}
void compileOSRExit(ExecState* exec)
{
    SamplingRegion samplingRegion("DFG OSR Exit Compilation");
    
    CodeBlock* codeBlock = exec->codeBlock();
    
    ASSERT(codeBlock);
    ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
    
    VM* vm = &exec->vm();
    
    // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
    // really be profitable.
    DeferGCForAWhile deferGC(vm->heap);

    uint32_t exitIndex = vm->osrExitIndex;
    OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
    
    prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
    
    // Compute the value recoveries.
    Operands<ValueRecovery> operands;
    codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
    
    // There may be an override, for forward speculations.
    if (!!exit.m_valueRecoveryOverride) {
        operands.setOperand(
            exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
    }
    
    SpeculationRecovery* recovery = 0;
    if (exit.m_recoveryIndex != UINT_MAX)
        recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];

    {
        CCallHelpers jit(vm, codeBlock);
        OSRExitCompiler exitCompiler(jit);

        jit.jitAssertHasValidCallFrame();
        
        if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) {
            Profiler::Database& database = *vm->m_perBytecodeProfiler;
            Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
            
            Profiler::OSRExit* profilerExit = compilation->addOSRExit(
                exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
                exit.m_kind, exit.m_kind == UncountableInvalidation);
            jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
        }
        
        exitCompiler.compileExit(exit, operands, recovery);
        
        LinkBuffer patchBuffer(*vm, jit, codeBlock);
        exit.m_code = FINALIZE_CODE_IF(
            shouldShowDisassembly() || Options::verboseOSR(),
            patchBuffer,
            ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
                exitIndex, toCString(exit.m_codeOrigin).data(),
                exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
                toCString(ignoringContext<DumpContext>(operands)).data()));
    }
    
    {
        RepatchBuffer repatchBuffer(codeBlock);
        repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
    }
    
    vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
}
Exemple #10
0
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
    // 1) Pro-forma stuff.
#if DFG_ENABLE(DEBUG_VERBOSE)
    dataLogF("OSR exit for (");
    for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        dataLogF("bc#%u", codeOrigin.bytecodeIndex);
        if (!codeOrigin.inlineCallFrame)
            break;
        dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
    }
    dataLogF(")  ");
    dataLog(operands);
#endif

    if (Options::printEachOSRExit()) {
        SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
        debugInfo->codeBlock = m_jit.codeBlock();
        
        m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
    }
    
#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
    m_jit.breakpoint();
#endif
    
#if DFG_ENABLE(SUCCESS_STATS)
    static SamplingCounter counter("SpeculationFailure");
    m_jit.emitCount(counter);
#endif
    
    // 2) Perform speculation recovery. This only comes into play when an operation
    //    starts mutating state before verifying the speculation it has already made.
    
    if (recovery) {
        switch (recovery->type()) {
        case SpeculativeAdd:
            m_jit.sub32(recovery->src(), recovery->dest());
            m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
            break;
            
        case BooleanSpeculationCheck:
            m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
            break;
            
        default:
            break;
        }
    }

    // 3) Refine some array and/or value profile, if appropriate.
    
    if (!!exit.m_jsValueSource) {
        if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
            // If the instruction that this originated from has an array profile, then
            // refine it. If it doesn't, then do nothing. The latter could happen for
            // hoisted checks, or checks emitted for operations that didn't have array
            // profiling - either ops that aren't array accesses at all, or weren't
            // known to be array acceses in the bytecode. The latter case is a FIXME
            // while the former case is an outcome of a CheckStructure not knowing why
            // it was emitted (could be either due to an inline cache of a property
            // property access, or due to an array profile).
            
            CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
            if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
                GPRReg usedRegister;
                if (exit.m_jsValueSource.isAddress())
                    usedRegister = exit.m_jsValueSource.base();
                else
                    usedRegister = exit.m_jsValueSource.gpr();
                
                GPRReg scratch1;
                GPRReg scratch2;
                scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
                scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
                
#if CPU(ARM64)
                m_jit.pushToSave(scratch1);
                m_jit.pushToSave(scratch2);
#else
                m_jit.push(scratch1);
                m_jit.push(scratch2);
#endif
                
                GPRReg value;
                if (exit.m_jsValueSource.isAddress()) {
                    value = scratch1;
                    m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
                } else
                    value = exit.m_jsValueSource.gpr();
                
                m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
                m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
                m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
                m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
                m_jit.lshift32(scratch1, scratch2);
                m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
                
#if CPU(ARM64)
                m_jit.popToRestore(scratch2);
                m_jit.popToRestore(scratch1);
#else
                m_jit.pop(scratch2);
                m_jit.pop(scratch1);
#endif
            }
        }
        
        if (!!exit.m_valueProfile) {
            EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
            
            if (exit.m_jsValueSource.isAddress()) {
                // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
                // since we know how to restore it.
                m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
                m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
                m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
            } else
                m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
        }
    }
    
    // What follows is an intentionally simple OSR exit implementation that generates
    // fairly poor code but is very easy to hack. In particular, it dumps all state that
    // needs conversion into a scratch buffer so that in step 6, where we actually do the
    // conversions, we know that all temp registers are free to use and the variable is
    // definitely in a well-known spot in the scratch buffer regardless of whether it had
    // originally been in a register or spilled. This allows us to decouple "where was
    // the variable" from "how was it represented". Consider that the
    // Int32DisplacedInJSStack recovery: it tells us that the value is in a
    // particular place and that that place holds an unboxed int32. We have two different
    // places that a value could be (displaced, register) and a bunch of different
    // ways of representing a value. The number of recoveries is two * a bunch. The code
    // below means that we have to have two + a bunch cases rather than two * a bunch.
    // Once we have loaded the value from wherever it was, the reboxing is the same
    // regardless of its location. Likewise, before we do the reboxing, the way we get to
    // the value (i.e. where we load it from) is the same regardless of its type. Because
    // the code below always dumps everything into a scratch buffer first, the two
    // questions become orthogonal, which simplifies adding new types and adding new
    // locations.
    //
    // This raises the question: does using such a suboptimal implementation of OSR exit,
    // where we always emit code to dump all state into a scratch buffer only to then
    // dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
    // are rare. Our tiering strategy ensures this. This is because if an OSR exit is
    // taken more than ~100 times, we jettison the DFG code block along with all of its
    // exits. It is impossible for an OSR exit - i.e. the code we compile below - to
    // execute frequently enough for the codegen to matter that much. It probably matters
    // enough that we don't want to turn this into some super-slow function call, but so
    // long as we're generating straight-line code, that code can be pretty bad. Also
    // because we tend to exit only along one OSR exit from any DFG code block - that's an
    // empirical result that we're extremely confident about - the code size of this
    // doesn't matter much. Hence any attempt to optimize the codegen here is just purely
    // harmful to the system: it probably won't reduce either net memory usage or net
    // execution time. It will only prevent us from cleanly decoupling "where was the
    // variable" from "how was it represented", which will make it more difficult to add
    // features in the future and it will make it harder to reason about bugs.

    // 4) Save all state from GPRs into the scratch buffer.
    
    ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
    EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case InGPR:
        case UnboxedInt32InGPR:
        case UInt32InGPR:
        case UnboxedInt52InGPR:
        case UnboxedStrictInt52InGPR:
        case UnboxedCellInGPR:
            m_jit.store64(recovery.gpr(), scratch + index);
            break;
            
        default:
            break;
        }
    }
    
    // And voila, all GPRs are free to reuse.
    
    // 5) Save all state from FPRs into the scratch buffer.
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case InFPR:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
            m_jit.storeDouble(recovery.fpr(), GPRInfo::regT0);
            break;
            
        default:
            break;
        }
    }
    
    // Now, all FPRs are also free.
    
    // 6) Save all state from the stack into the scratch buffer. For simplicity we
    //    do this even for state that's already in the right place on the stack.
    //    It makes things simpler later.

    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        
        switch (recovery.technique()) {
        case DisplacedInJSStack:
        case CellDisplacedInJSStack:
        case BooleanDisplacedInJSStack:
        case Int32DisplacedInJSStack:
        case DoubleDisplacedInJSStack:
        case Int52DisplacedInJSStack:
        case StrictInt52DisplacedInJSStack:
            m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
            m_jit.store64(GPRInfo::regT0, scratch + index);
            break;
            
        default:
            break;
        }
    }
    
    // 7) Do all data format conversions and store the results into the stack.
    
    bool haveArguments = false;
    
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        int operand = operands.operandForIndex(index);
        
        switch (recovery.technique()) {
        case InGPR:
        case UnboxedCellInGPR:
        case DisplacedInJSStack:
        case CellDisplacedInJSStack:
        case BooleanDisplacedInJSStack:
            m_jit.load64(scratch + index, GPRInfo::regT0);
            m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
            break;
            
        case UnboxedInt32InGPR:
        case Int32DisplacedInJSStack:
            m_jit.load64(scratch + index, GPRInfo::regT0);
            m_jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
            m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
            m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
            break;
            
        case UnboxedInt52InGPR:
        case Int52DisplacedInJSStack:
            m_jit.load64(scratch + index, GPRInfo::regT0);
            m_jit.rshift64(
                AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
            m_jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
            m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
            break;
            
        case UnboxedStrictInt52InGPR:
        case StrictInt52DisplacedInJSStack:
            m_jit.load64(scratch + index, GPRInfo::regT0);
            m_jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
            m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
            break;
            
        case UInt32InGPR:
            m_jit.load64(scratch + index, GPRInfo::regT0);
            m_jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
            m_jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
            m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
            break;
            
        case InFPR:
        case DoubleDisplacedInJSStack:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
            m_jit.loadDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
            m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
            m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
            break;
            
        case Constant:
            m_jit.store64(
                AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())),
                AssemblyHelpers::addressFor(operand));
            break;
            
        case ArgumentsThatWereNotCreated:
            haveArguments = true;
            // We can't restore this yet but we can make sure that the stack appears
            // sane.
            m_jit.store64(
                AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue())),
                AssemblyHelpers::addressFor(operand));
            break;
            
        default:
            break;
        }
    }
    
    // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
    //    that all new calls into this code will go to the new JIT, so the execute
    //    counter only affects call frames that performed OSR exit and call frames
    //    that were still executing the old JIT at the time of another call frame's
    //    OSR exit. We want to ensure that the following is true:
    //
    //    (a) Code the performs an OSR exit gets a chance to reenter optimized
    //        code eventually, since optimized code is faster. But we don't
    //        want to do such reentery too aggressively (see (c) below).
    //
    //    (b) If there is code on the call stack that is still running the old
    //        JIT's code and has never OSR'd, then it should get a chance to
    //        perform OSR entry despite the fact that we've exited.
    //
    //    (c) Code the performs an OSR exit should not immediately retry OSR
    //        entry, since both forms of OSR are expensive. OSR entry is
    //        particularly expensive.
    //
    //    (d) Frequent OSR failures, even those that do not result in the code
    //        running in a hot loop, result in recompilation getting triggered.
    //
    //    To ensure (c), we'd like to set the execute counter to
    //    counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
    //    (a) and (b), since then every OSR exit would delay the opportunity for
    //    every call frame to perform OSR entry. Essentially, if OSR exit happens
    //    frequently and the function has few loops, then the counter will never
    //    become non-negative and OSR entry will never be triggered. OSR entry
    //    will only happen if a loop gets hot in the old JIT, which does a pretty
    //    good job of ensuring (a) and (b). But that doesn't take care of (d),
    //    since each speculation failure would reset the execute counter.
    //    So we check here if the number of speculation failures is significantly
    //    larger than the number of successes (we want 90% success rate), and if
    //    there have been a large enough number of failures. If so, we set the
    //    counter to 0; otherwise we set the counter to
    //    counterValueForOptimizeAfterWarmUp().
    
    handleExitCounts(m_jit, exit);
    
    // 9) Reify inlined call frames.
    
    reifyInlinedCallFrames(m_jit, exit);
    
    // 10) Create arguments if necessary and place them into the appropriate aliased
    //     registers.
    
    if (haveArguments) {
        HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
            NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;

        for (size_t index = 0; index < operands.size(); ++index) {
            const ValueRecovery& recovery = operands[index];
            if (recovery.technique() != ArgumentsThatWereNotCreated)
                continue;
            int operand = operands.operandForIndex(index);
            // Find the right inline call frame.
            InlineCallFrame* inlineCallFrame = 0;
            for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
                 current;
                 current = current->caller.inlineCallFrame) {
                if (current->stackOffset >= operand) {
                    inlineCallFrame = current;
                    break;
                }
            }

            if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
                continue;
            VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame);
            if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
                // We know this call frame optimized out an arguments object that
                // the baseline JIT would have created. Do that creation now.
                if (inlineCallFrame) {
                    m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
                    m_jit.setupArguments(GPRInfo::regT0);
                } else
                    m_jit.setupArgumentsExecState();
                m_jit.move(
                    AssemblyHelpers::TrustedImmPtr(
                        bitwise_cast<void*>(operationCreateArguments)),
                    GPRInfo::nonArgGPR0);
                m_jit.call(GPRInfo::nonArgGPR0);
                m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
                m_jit.store64(
                    GPRInfo::returnValueGPR,
                    AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
                m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
            }

            m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
            m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
        }
    }
    
    // 11) Load the result of the last bytecode operation into regT0.
    
    if (exit.m_lastSetOperand.isValid())
        m_jit.load64(AssemblyHelpers::addressFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
    
    // 12) And finish.
    
    adjustAndJumpToTarget(m_jit, exit);
}
void VariableEventStream::reconstruct(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
    unsigned index, Operands<ValueRecovery>& valueRecoveries) const
{
    ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
    CodeBlock* baselineCodeBlock = codeBlock->baselineVersion();
    
    unsigned numVariables;
    if (codeOrigin.inlineCallFrame)
        numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + VirtualRegister(codeOrigin.inlineCallFrame->stackOffset).toLocal() + 1;
    else
        numVariables = baselineCodeBlock->m_numCalleeRegisters;
    
    // Crazy special case: if we're at index == 0 then this must be an argument check
    // failure, in which case all variables are already set up. The recoveries should
    // reflect this.
    if (!index) {
        valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
        for (size_t i = 0; i < valueRecoveries.size(); ++i) {
            valueRecoveries[i] = ValueRecovery::displacedInJSStack(
                VirtualRegister(valueRecoveries.operandForIndex(i)), DataFormatJS);
        }
        return;
    }
    
    // Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go.
    unsigned startIndex = index - 1;
    while (at(startIndex).kind() != Reset)
        startIndex--;
    
    // Step 2: Create a mock-up of the DFG's state and execute the events.
    Operands<ValueSource> operandSources(codeBlock->numParameters(), numVariables);
    for (unsigned i = operandSources.size(); i--;)
        operandSources[i] = ValueSource(SourceIsDead);
    HashMap<MinifiedID, MinifiedGenerationInfo> generationInfos;
    for (unsigned i = startIndex; i < index; ++i) {
        const VariableEvent& event = at(i);
        switch (event.kind()) {
        case Reset:
            // nothing to do.
            break;
        case BirthToFill:
        case BirthToSpill:
        case Birth: {
            MinifiedGenerationInfo info;
            info.update(event);
            generationInfos.add(event.id(), info);
            break;
        }
        case Fill:
        case Spill:
        case Death: {
            HashMap<MinifiedID, MinifiedGenerationInfo>::iterator iter = generationInfos.find(event.id());
            ASSERT(iter != generationInfos.end());
            iter->value.update(event);
            break;
        }
        case MovHintEvent:
            if (operandSources.hasOperand(event.bytecodeRegister()))
                operandSources.setOperand(event.bytecodeRegister(), ValueSource(event.id()));
            break;
        case SetLocalEvent:
            if (operandSources.hasOperand(event.bytecodeRegister()))
                operandSources.setOperand(event.bytecodeRegister(), ValueSource::forDataFormat(event.machineRegister(), event.dataFormat()));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }
    
    // Step 3: Compute value recoveries!
    valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
    for (unsigned i = 0; i < operandSources.size(); ++i) {
        ValueSource& source = operandSources[i];
        if (source.isTriviallyRecoverable()) {
            valueRecoveries[i] = source.valueRecovery();
            continue;
        }
        
        ASSERT(source.kind() == HaveNode);
        MinifiedNode* node = graph.at(source.id());
        MinifiedGenerationInfo info = generationInfos.get(source.id());
        if (!info.alive) {
            valueRecoveries[i] = ValueRecovery::constant(jsUndefined());
            continue;
        }

        if (tryToSetConstantRecovery(valueRecoveries[i], node))
            continue;
        
        ASSERT(info.format != DataFormatNone);
        
        if (info.filled) {
            if (info.format == DataFormatDouble) {
                valueRecoveries[i] = ValueRecovery::inFPR(info.u.fpr, DataFormatDouble);
                continue;
            }
#if USE(JSVALUE32_64)
            if (info.format & DataFormatJS) {
                valueRecoveries[i] = ValueRecovery::inPair(info.u.pair.tagGPR, info.u.pair.payloadGPR);
                continue;
            }
#endif
            valueRecoveries[i] = ValueRecovery::inGPR(info.u.gpr, info.format);
            continue;
        }
        
        valueRecoveries[i] =
            ValueRecovery::displacedInJSStack(static_cast<VirtualRegister>(info.u.virtualReg), info.format);
    }
}
Exemple #12
0
void VariableEventStream::reconstruct(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
    unsigned index, Operands<ValueRecovery>& valueRecoveries) const
{
    ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
    CodeBlock* baselineCodeBlock = codeBlock->baselineVersion();
    
    unsigned numVariables;
    if (codeOrigin.inlineCallFrame)
        numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + codeOrigin.inlineCallFrame->stackOffset;
    else
        numVariables = baselineCodeBlock->m_numCalleeRegisters;
    
    // Crazy special case: if we're at index == 0 then this must be an argument check
    // failure, in which case all variables are already set up. The recoveries should
    // reflect this.
    if (!index) {
        valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
        for (size_t i = 0; i < valueRecoveries.size(); ++i)
            valueRecoveries[i] = ValueRecovery::alreadyInJSStack();
        return;
    }
    
    // Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go.
    unsigned startIndex = index - 1;
    while (at(startIndex).kind() != Reset)
        startIndex--;
    
#if DFG_ENABLE(DEBUG_VERBOSE)
    dataLogF("Computing OSR exit recoveries starting at seq#%u.\n", startIndex);
#endif

    // Step 2: Create a mock-up of the DFG's state and execute the events.
    Operands<ValueSource> operandSources(codeBlock->numParameters(), numVariables);
    Vector<MinifiedGenerationInfo, 32> generationInfos(graph.originalGraphSize());
    for (unsigned i = startIndex; i < index; ++i) {
        const VariableEvent& event = at(i);
        switch (event.kind()) {
        case Reset:
            // nothing to do.
            break;
        case BirthToFill:
        case BirthToSpill:
        case Fill:
        case Spill:
        case Death:
            generationInfos[event.nodeIndex()].update(event);
            break;
        case MovHint:
            if (operandSources.hasOperand(event.operand()))
                operandSources.setOperand(event.operand(), ValueSource(event.nodeIndex()));
            break;
        case SetLocalEvent:
            if (operandSources.hasOperand(event.operand()))
                operandSources.setOperand(event.operand(), ValueSource::forDataFormat(event.dataFormat()));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }
    
    // Step 3: Record the things that are live, so we can get to them more quickly.
    Vector<unsigned, 16> indicesOfLiveThings;
    for (unsigned i = 0; i < generationInfos.size(); ++i) {
        if (generationInfos[i].format != DataFormatNone)
            indicesOfLiveThings.append(i);
    }
    
    // Step 4: Compute value recoveries!
    valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables);
    for (unsigned i = 0; i < operandSources.size(); ++i) {
        ValueSource& source = operandSources[i];
        if (source.isTriviallyRecoverable()) {
            valueRecoveries[i] = source.valueRecovery();
            continue;
        }
        
        ASSERT(source.kind() == HaveNode);
        MinifiedNode* node = graph.at(source.nodeIndex());
        if (node) {
            if (node->hasConstantNumber()) {
                valueRecoveries[i] = ValueRecovery::constant(
                    codeBlock->constantRegister(
                        FirstConstantRegisterIndex + node->constantNumber()).get());
                continue;
            }
            if (node->hasWeakConstant()) {
                valueRecoveries[i] = ValueRecovery::constant(node->weakConstant());
                continue;
            }
            if (node->op() == PhantomArguments) {
                valueRecoveries[i] = ValueRecovery::argumentsThatWereNotCreated();
                continue;
            }
        }
        
        MinifiedGenerationInfo* info = &generationInfos[source.nodeIndex()];
        if (info->format == DataFormatNone) {
            // Try to see if there is an alternate node that would contain the value we want.
            // There are four possibilities:
            //
            // Int32ToDouble: We can use this in place of the original node, but
            //    we'd rather not; so we use it only if it is the only remaining
            //    live version.
            //
            // ValueToInt32: If the only remaining live version of the value is
            //    ValueToInt32, then we can use it.
            //
            // UInt32ToNumber: If the only live version of the value is a UInt32ToNumber
            //    then the only remaining uses are ones that want a properly formed number
            //    rather than a UInt32 intermediate.
            //
            // DoubleAsInt32: Same as UInt32ToNumber.
            //
            // The reverse of the above: This node could be a UInt32ToNumber, but its
            //    alternative is still alive. This means that the only remaining uses of
            //    the number would be fine with a UInt32 intermediate.
            
            bool found = false;
            
            if (node && node->op() == UInt32ToNumber) {
                NodeIndex nodeIndex = node->child1();
                node = graph.at(nodeIndex);
                info = &generationInfos[nodeIndex];
                if (info->format != DataFormatNone)
                    found = true;
            }
            
            if (!found) {
                NodeIndex int32ToDoubleIndex = NoNode;
                NodeIndex valueToInt32Index = NoNode;
                NodeIndex uint32ToNumberIndex = NoNode;
                NodeIndex doubleAsInt32Index = NoNode;
                
                for (unsigned i = 0; i < indicesOfLiveThings.size(); ++i) {
                    NodeIndex nodeIndex = indicesOfLiveThings[i];
                    node = graph.at(nodeIndex);
                    if (!node)
                        continue;
                    if (!node->hasChild1())
                        continue;
                    if (node->child1() != source.nodeIndex())
                        continue;
                    ASSERT(generationInfos[nodeIndex].format != DataFormatNone);
                    switch (node->op()) {
                    case Int32ToDouble:
                        int32ToDoubleIndex = nodeIndex;
                        break;
                    case ValueToInt32:
                        valueToInt32Index = nodeIndex;
                        break;
                    case UInt32ToNumber:
                        uint32ToNumberIndex = nodeIndex;
                        break;
                    case DoubleAsInt32:
                        doubleAsInt32Index = nodeIndex;
                        break;
                    default:
                        break;
                    }
                }
                
                NodeIndex nodeIndexToUse;
                if (doubleAsInt32Index != NoNode)
                    nodeIndexToUse = doubleAsInt32Index;
                else if (int32ToDoubleIndex != NoNode)
                    nodeIndexToUse = int32ToDoubleIndex;
                else if (valueToInt32Index != NoNode)
                    nodeIndexToUse = valueToInt32Index;
                else if (uint32ToNumberIndex != NoNode)
                    nodeIndexToUse = uint32ToNumberIndex;
                else
                    nodeIndexToUse = NoNode;
                
                if (nodeIndexToUse != NoNode) {
                    info = &generationInfos[nodeIndexToUse];
                    ASSERT(info->format != DataFormatNone);
                    found = true;
                }
            }
            
            if (!found) {
                valueRecoveries[i] = ValueRecovery::constant(jsUndefined());
                continue;
            }
        }
        
        ASSERT(info->format != DataFormatNone);
        
        if (info->filled) {
            if (info->format == DataFormatDouble) {
                valueRecoveries[i] = ValueRecovery::inFPR(info->u.fpr);
                continue;
            }
#if USE(JSVALUE32_64)
            if (info->format & DataFormatJS) {
                valueRecoveries[i] = ValueRecovery::inPair(info->u.pair.tagGPR, info->u.pair.payloadGPR);
                continue;
            }
#endif
            valueRecoveries[i] = ValueRecovery::inGPR(info->u.gpr, info->format);
            continue;
        }
        
        valueRecoveries[i] =
            ValueRecovery::displacedInJSStack(static_cast<VirtualRegister>(info->u.virtualReg), info->format);
    }
    
    // Step 5: Make sure that for locals that coincide with true call frame headers, the exit compiler knows
    // that those values don't have to be recovered. Signal this by using ValueRecovery::alreadyInJSStack()
    for (InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
        for (unsigned i = JSStack::CallFrameHeaderSize; i--;)
            valueRecoveries.setLocal(inlineCallFrame->stackOffset - i - 1, ValueRecovery::alreadyInJSStack());
    }
}
Exemple #13
0
void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands)
{
    HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        int operand = operands.operandForIndex(index);
        
        if (recovery.technique() != DirectArgumentsThatWereNotCreated
            && recovery.technique() != ClonedArgumentsThatWereNotCreated)
            continue;
        
        MinifiedID id = recovery.nodeID();
        auto iter = alreadyAllocatedArguments.find(id);
        if (iter != alreadyAllocatedArguments.end()) {
            JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
            m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
            m_jit.storeValue(regs, CCallHelpers::addressFor(operand));
            continue;
        }
        
        InlineCallFrame* inlineCallFrame =
            m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();

        int stackOffset;
        if (inlineCallFrame)
            stackOffset = inlineCallFrame->stackOffset;
        else
            stackOffset = 0;
        
        if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
            m_jit.loadPtr(
                AssemblyHelpers::addressFor(stackOffset + JSStack::Callee),
                GPRInfo::regT0);
        } else {
            m_jit.move(
                AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
                GPRInfo::regT0);
        }
        
        if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
            m_jit.load32(
                AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount),
                GPRInfo::regT1);
        } else {
            m_jit.move(
                AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
                GPRInfo::regT1);
        }
        
        m_jit.setupArgumentsWithExecState(
            AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
        switch (recovery.technique()) {
        case DirectArgumentsThatWereNotCreated:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
            break;
        case ClonedArgumentsThatWereNotCreated:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
        m_jit.call(GPRInfo::nonArgGPR0);
        m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
        
        alreadyAllocatedArguments.add(id, operand);
    }
}