std::unique_ptr<CryptoAlgorithmParameters> JSCryptoAlgorithmDictionary::createParametersForGenerateKey(ExecState* exec, CryptoAlgorithmIdentifier algorithm, JSValue value)
{
    switch (algorithm) {
    case CryptoAlgorithmIdentifier::RSAES_PKCS1_v1_5:
    case CryptoAlgorithmIdentifier::RSASSA_PKCS1_v1_5:
    case CryptoAlgorithmIdentifier::RSA_PSS:
    case CryptoAlgorithmIdentifier::RSA_OAEP:
        return createRsaKeyGenParams(exec, value);
    case CryptoAlgorithmIdentifier::ECDSA:
    case CryptoAlgorithmIdentifier::ECDH:
        setDOMException(exec, NOT_SUPPORTED_ERR);
        return nullptr;
    case CryptoAlgorithmIdentifier::AES_CTR:
    case CryptoAlgorithmIdentifier::AES_CBC:
    case CryptoAlgorithmIdentifier::AES_CMAC:
    case CryptoAlgorithmIdentifier::AES_GCM:
    case CryptoAlgorithmIdentifier::AES_CFB:
    case CryptoAlgorithmIdentifier::AES_KW:
        return createAesKeyGenParams(exec, value);
    case CryptoAlgorithmIdentifier::HMAC:
        return createHmacKeyParams(exec, value);
    case CryptoAlgorithmIdentifier::DH:
    case CryptoAlgorithmIdentifier::SHA_1:
    case CryptoAlgorithmIdentifier::SHA_224:
    case CryptoAlgorithmIdentifier::SHA_256:
    case CryptoAlgorithmIdentifier::SHA_384:
    case CryptoAlgorithmIdentifier::SHA_512:
    case CryptoAlgorithmIdentifier::CONCAT:
    case CryptoAlgorithmIdentifier::HKDF_CTR:
    case CryptoAlgorithmIdentifier::PBKDF2:
        setDOMException(exec, NOT_SUPPORTED_ERR);
        return nullptr;
    }
    RELEASE_ASSERT_NOT_REACHED();
    return nullptr;
}
Exemplo n.º 2
0
static JSCell* formatLocaleDate(ExecState* exec, const GregorianDateTime& gdt, LocaleDateTimeFormat format)
{
#if OS(WINDOWS)
    SYSTEMTIME systemTime;
    memset(&systemTime, 0, sizeof(systemTime));
    systemTime.wYear = gdt.year();
    systemTime.wMonth = gdt.month() + 1;
    systemTime.wDay = gdt.monthDay();
    systemTime.wDayOfWeek = gdt.weekDay();
    systemTime.wHour = gdt.hour();
    systemTime.wMinute = gdt.minute();
    systemTime.wSecond = gdt.second();

    Vector<UChar, 128> buffer;
    size_t length = 0;

    if (format == LocaleDate) {
        buffer.resize(GetDateFormatW(LOCALE_USER_DEFAULT, DATE_LONGDATE, &systemTime, 0, 0, 0));
        length = GetDateFormatW(LOCALE_USER_DEFAULT, DATE_LONGDATE, &systemTime, 0, buffer.data(), buffer.size());
    } else if (format == LocaleTime) {
        buffer.resize(GetTimeFormatW(LOCALE_USER_DEFAULT, 0, &systemTime, 0, 0, 0));
        length = GetTimeFormatW(LOCALE_USER_DEFAULT, 0, &systemTime, 0, buffer.data(), buffer.size());
    } else if (format == LocaleDateAndTime) {
        buffer.resize(GetDateFormatW(LOCALE_USER_DEFAULT, DATE_LONGDATE, &systemTime, 0, 0, 0) + GetTimeFormatW(LOCALE_USER_DEFAULT, 0, &systemTime, 0, 0, 0));
        length = GetDateFormatW(LOCALE_USER_DEFAULT, DATE_LONGDATE, &systemTime, 0, buffer.data(), buffer.size());
        if (length) {
            buffer[length - 1] = ' ';
            length += GetTimeFormatW(LOCALE_USER_DEFAULT, 0, &systemTime, 0, buffer.data() + length, buffer.size() - length);
        }
    } else
        RELEASE_ASSERT_NOT_REACHED();

    //  Remove terminating null character.
    if (length)
        length--;

    return jsNontrivialString(exec, String(buffer.data(), length));

#else // OS(WINDOWS)

#if HAVE(LANGINFO_H)
    static const nl_item formats[] = { D_T_FMT, D_FMT, T_FMT };
#else
    static const char* const formatStrings[] = { "%#c", "%#x", "%X" };
#endif

    // Offset year if needed
    struct tm localTM = gdt;
    int year = gdt.year();
    bool yearNeedsOffset = year < 1900 || year > 2038;
    if (yearNeedsOffset)
        localTM.tm_year = equivalentYearForDST(year) - 1900;

#if HAVE(LANGINFO_H)
    // We do not allow strftime to generate dates with 2-digits years,
    // both to avoid ambiguity, and a crash in strncpy, for years that
    // need offset.
    char* formatString = strdup(nl_langinfo(formats[format]));
    char* yPos = strchr(formatString, 'y');
    if (yPos)
        *yPos = 'Y';
#endif

    // Do the formatting
    const int bufsize = 128;
    char timebuffer[bufsize];

#if HAVE(LANGINFO_H)
    size_t ret = strftime(timebuffer, bufsize, formatString, &localTM);
    free(formatString);
#else
    size_t ret = strftime(timebuffer, bufsize, formatStrings[format], &localTM);
#endif

    if (ret == 0)
        return jsEmptyString(exec);

    // Copy original into the buffer
    if (yearNeedsOffset && format != LocaleTime) {
        static const int yearLen = 5;   // FIXME will be a problem in the year 10,000
        char yearString[yearLen];

        snprintf(yearString, yearLen, "%d", localTM.tm_year + 1900);
        char* yearLocation = strstr(timebuffer, yearString);
        snprintf(yearString, yearLen, "%d", year);

        strncpy(yearLocation, yearString, yearLen - 1);
    }

    // Convert multi-byte result to UNICODE.
    // If __STDC_ISO_10646__ is defined, wide character represents
    // UTF-16 (or UTF-32) code point. In most modern Unix like system
    // (e.g. Linux with glibc 2.2 and above) the macro is defined,
    // and wide character represents UTF-32 code point.
    // Here we static_cast potential UTF-32 to UTF-16, it should be
    // safe because date and (or) time related characters in different languages
    // should be in UNICODE BMP. If mbstowcs fails, we just fall
    // back on using multi-byte result as-is.
#ifdef __STDC_ISO_10646__
    UChar buffer[bufsize];
    wchar_t tempbuffer[bufsize];
    size_t length = mbstowcs(tempbuffer, timebuffer, bufsize - 1);
    if (length != static_cast<size_t>(-1)) {
        for (size_t i = 0; i < length; ++i)
            buffer[i] = static_cast<UChar>(tempbuffer[i]);
        return jsNontrivialString(exec, String(buffer, length));
    }
#endif

    return jsNontrivialString(exec, timebuffer);
#endif // OS(WINDOWS)
}
Exemplo n.º 3
0
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
{
    if (verboseCompilationEnabled() && osrEntryBytecodeIndex != UINT_MAX) {
        dataLog("\n");
        dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
        dataLog("\n");
    }
    
    Graph dfg(vm, *this, longLivedState);
    
    if (!parse(dfg)) {
        finalizer = adoptPtr(new FailedFinalizer(*this));
        return FailPath;
    }
    
    // By this point the DFG bytecode parser will have potentially mutated various tables
    // in the CodeBlock. This is a good time to perform an early shrink, which is more
    // powerful than a late one. It's safe to do so because we haven't generated any code
    // that references any of the tables directly, yet.
    codeBlock->shrinkToFit(CodeBlock::EarlyShrink);

    if (validationEnabled())
        validate(dfg);
    
    performCPSRethreading(dfg);
    performUnification(dfg);
    performPredictionInjection(dfg);
    
    if (mode == FTLForOSREntryMode) {
        bool result = performOSREntrypointCreation(dfg);
        if (!result) {
            finalizer = adoptPtr(new FailedFinalizer(*this));
            return FailPath;
        }
        performCPSRethreading(dfg);
    }
    
    if (validationEnabled())
        validate(dfg);
    
    performBackwardsPropagation(dfg);
    performPredictionPropagation(dfg);
    performFixup(dfg);
    performTypeCheckHoisting(dfg);
    
    unsigned count = 1;
    dfg.m_fixpointState = FixpointNotConverged;
    for (;; ++count) {
        if (logCompilationChanges())
            dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count);
        bool changed = false;
        
        if (validationEnabled())
            validate(dfg);
        
        performCFA(dfg);
        changed |= performConstantFolding(dfg);
        changed |= performArgumentsSimplification(dfg);
        changed |= performCFGSimplification(dfg);
        changed |= performCSE(dfg);
        
        if (!changed)
            break;
        
        performCPSRethreading(dfg);
    }
    
    if (logCompilationChanges())
        dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count);

    dfg.m_fixpointState = FixpointConverged;

    performStoreElimination(dfg);
    
    // If we're doing validation, then run some analyses, to give them an opportunity
    // to self-validate. Now is as good a time as any to do this.
    if (validationEnabled()) {
        dfg.m_dominators.computeIfNecessary(dfg);
        dfg.m_naturalLoops.computeIfNecessary(dfg);
    }

    switch (mode) {
    case DFGMode: {
        performTierUpCheckInjection(dfg);
        break;
    }
    
    case FTLMode:
    case FTLForOSREntryMode: {
#if ENABLE(FTL_JIT)
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = adoptPtr(new FailedFinalizer(*this));
            return FailPath;
        }
        
        performCriticalEdgeBreaking(dfg);
        performLoopPreHeaderCreation(dfg);
        performCPSRethreading(dfg);
        performSSAConversion(dfg);
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performLICM(dfg);
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM.
        performStackLayout(dfg);
        performLivenessAnalysis(dfg);
        performFlushLivenessAnalysis(dfg);
        performOSRAvailabilityAnalysis(dfg);
        
        dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:");
        
        initializeLLVM();
        
        FTL::State state(dfg);
        FTL::lowerDFGToLLVM(state);
        
        if (Options::reportCompileTimes())
            beforeFTL = currentTimeMS();
        
        if (Options::llvmAlwaysFailsBeforeCompile()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        FTL::compile(state);

        if (Options::llvmAlwaysFailsBeforeLink()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        FTL::link(state);
        return FTLPath;
#else
        RELEASE_ASSERT_NOT_REACHED();
        break;
#endif // ENABLE(FTL_JIT)
    }
        
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    
    performCPSRethreading(dfg);
    performDCE(dfg);
    performStackLayout(dfg);
    performVirtualRegisterAllocation(dfg);
    dumpAndVerifyGraph(dfg, "Graph after optimization:");

    JITCompiler dataFlowJIT(dfg);
    if (codeBlock->codeType() == FunctionCode) {
        dataFlowJIT.compileFunction();
        dataFlowJIT.linkFunction();
    } else {
        dataFlowJIT.compile();
        dataFlowJIT.link();
    }
    
    return DFGPath;
}
Exemplo n.º 4
0
bool doesGC(Graph& graph, Node* node)
{
    if (clobbersHeap(graph, node))
        return true;
    
    // Now consider nodes that don't clobber the world but that still may GC. This includes all
    // nodes. By convention we put world-clobbering nodes in the block of "false" cases but we can
    // put them anywhere.
    switch (node->op()) {
    case JSConstant:
    case DoubleConstant:
    case Int52Constant:
    case Identity:
    case GetCallee:
    case GetArgumentCount:
    case GetLocal:
    case SetLocal:
    case MovHint:
    case ZombieHint:
    case Phantom:
    case Upsilon:
    case Phi:
    case Flush:
    case PhantomLocal:
    case GetLocalUnlinked:
    case SetArgument:
    case BitAnd:
    case BitOr:
    case BitXor:
    case BitLShift:
    case BitRShift:
    case BitURShift:
    case ValueToInt32:
    case UInt32ToNumber:
    case DoubleAsInt32:
    case ArithAdd:
    case ArithClz32:
    case ArithSub:
    case ArithNegate:
    case ArithMul:
    case ArithIMul:
    case ArithDiv:
    case ArithMod:
    case ArithAbs:
    case ArithMin:
    case ArithMax:
    case ArithPow:
    case ArithSqrt:
    case ArithFRound:
    case ArithSin:
    case ArithCos:
    case ArithLog:
    case ValueAdd:
    case GetById:
    case GetByIdFlush:
    case PutById:
    case PutByIdFlush:
    case PutByIdDirect:
    case CheckStructure:
    case GetExecutable:
    case GetButterfly:
    case CheckArray:
    case GetScope:
    case SkipScope:
    case GetClosureVar:
    case PutClosureVar:
    case GetGlobalVar:
    case PutGlobalVar:
    case VarInjectionWatchpoint:
    case CheckCell:
    case CheckNotEmpty:
    case RegExpExec:
    case RegExpTest:
    case CompareLess:
    case CompareLessEq:
    case CompareGreater:
    case CompareGreaterEq:
    case CompareEq:
    case CompareEqConstant:
    case CompareStrictEq:
    case Call:
    case Construct:
    case CallVarargs:
    case ConstructVarargs:
    case LoadVarargs:
    case CallForwardVarargs:
    case ConstructForwardVarargs:
    case NativeCall:
    case NativeConstruct:
    case Breakpoint:
    case ProfileWillCall:
    case ProfileDidCall:
    case ProfileType:
    case ProfileControlFlow:
    case CheckHasInstance:
    case InstanceOf:
    case IsUndefined:
    case IsBoolean:
    case IsNumber:
    case IsString:
    case IsObject:
    case IsObjectOrNull:
    case IsFunction:
    case TypeOf:
    case LogicalNot:
    case ToPrimitive:
    case ToString:
    case CallStringConstructor:
    case In:
    case Jump:
    case Branch:
    case Switch:
    case Return:
    case Throw:
    case CountExecution:
    case ForceOSRExit:
    case CheckWatchdogTimer:
    case StringFromCharCode:
    case Unreachable:
    case ExtractOSREntryLocal:
    case CheckTierUpInLoop:
    case CheckTierUpAtReturn:
    case CheckTierUpAndOSREnter:
    case LoopHint:
    case StoreBarrier:
    case StoreBarrierWithNullCheck:
    case InvalidationPoint:
    case NotifyWrite:
    case CheckInBounds:
    case ConstantStoragePointer:
    case Check:
    case MultiGetByOffset:
    case ValueRep:
    case DoubleRep:
    case Int52Rep:
    case GetGetter:
    case GetSetter:
    case GetByVal:
    case GetIndexedPropertyStorage:
    case GetArrayLength:
    case ArrayPush:
    case ArrayPop:
    case StringCharAt:
    case StringCharCodeAt:
    case GetTypedArrayByteOffset:
    case PutByValDirect:
    case PutByVal:
    case PutByValAlias:
    case PutStructure:
    case GetByOffset:
    case GetGetterSetterByOffset:
    case PutByOffset:
    case GetEnumerableLength:
    case HasGenericProperty:
    case HasStructureProperty:
    case HasIndexedProperty:
    case GetDirectPname:
    case FiatInt52:
    case BooleanToNumber:
    case CheckBadCell:
    case BottomValue:
    case PhantomNewObject:
    case PhantomNewFunction:
    case PhantomDirectArguments:
    case PhantomClonedArguments:
    case GetMyArgumentByVal:
    case ForwardVarargs:
    case PutHint:
    case CheckStructureImmediate:
    case PutStack:
    case KillStack:
    case GetStack:
    case GetFromArguments:
    case PutToArguments:
        return false;

    case CreateActivation:
    case CreateDirectArguments:
    case CreateScopedArguments:
    case CreateClonedArguments:
    case ToThis:
    case CreateThis:
    case AllocatePropertyStorage:
    case ReallocatePropertyStorage:
    case Arrayify:
    case ArrayifyToStructure:
    case NewObject:
    case NewArray:
    case NewArrayWithSize:
    case NewArrayBuffer:
    case NewRegexp:
    case NewStringObject:
    case MakeRope:
    case NewFunction:
    case NewTypedArray:
    case ThrowReferenceError:
    case GetPropertyEnumerator:
    case GetEnumeratorStructurePname:
    case GetEnumeratorGenericPname:
    case ToIndexString:
    case MaterializeNewObject:
        return true;
        
    case MultiPutByOffset:
        return node->multiPutByOffsetData().reallocatesStorage();

    case LastNodeType:
        RELEASE_ASSERT_NOT_REACHED();
        return true;
    }
    
    RELEASE_ASSERT_NOT_REACHED();
    return true;
}
Exemplo n.º 5
0
bool JITCode::contains(void*)
{
    // We have no idea what addresses the FTL code contains, yet.
    RELEASE_ASSERT_NOT_REACHED();
    return false;
}
Exemplo n.º 6
0
void generate(Code& code, CCallHelpers& jit)
{
    TimingScope timingScope("Air::generate");
    
    // We don't expect the incoming code to have predecessors computed.
    code.resetReachability();
    
    if (shouldValidateIR())
        validate(code);

    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
    if (shouldDumpIR() && !shouldDumpIRAtEachPhase()) {
        dataLog("Initial air:\n");
        dataLog(code);
    }

    // This is where we run our optimizations and transformations.
    // FIXME: Add Air optimizations.
    // https://bugs.webkit.org/show_bug.cgi?id=150456
    
    eliminateDeadCode(code);

    // This is where we would have a real register allocator. Then, we could use spillEverything()
    // in place of the register allocator only for testing.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150457
    spillEverything(code);

    // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
    // does things like identify which callee-saves we're using and saves them.
    handleCalleeSaves(code);

    // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does
    // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we
    // shouldn't have to worry about this very much.
    allocateStack(code);

    // If we coalesced moves then we can unbreak critical edges. This is the main reason for this
    // phase.
    simplifyCFG(code);

    // FIXME: We should really have a code layout optimization here.
    // https://bugs.webkit.org/show_bug.cgi?id=150478

    reportUsedRegisters(code);

    if (shouldValidateIR())
        validate(code);

    // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping,
    // since the final generation is not a phase.
    if (shouldDumpIR()) {
        dataLog("Air after ", code.lastPhaseName(), ", before generation:\n");
        dataLog(code);
    }

    TimingScope codeGenTimingScope("Air::generate backend");

    // And now, we generate code.
    jit.emitFunctionPrologue();
    jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);

    GenerationContext context;
    context.code = &code;
    IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
    IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());

    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
        if (blockLabels[target].isSet()) {
            jump.linkTo(blockLabels[target], &jit);
            return;
        }

        blockJumps[target].append(jump);
    };

    for (BasicBlock* block : code) {
        blockJumps[block].link(&jit);
        ASSERT(block->size() >= 1);
        for (unsigned i = 0; i < block->size() - 1; ++i) {
            CCallHelpers::Jump jump = block->at(i).generate(jit, context);
            ASSERT_UNUSED(jump, !jump.isSet());
        }

        if (block->last().opcode == Jump
            && block->successorBlock(0) == code.findNextBlock(block))
            continue;

        if (block->last().opcode == Ret) {
            // We currently don't represent the full prologue/epilogue in Air, so we need to
            // have this override.
            jit.emitFunctionEpilogue();
            jit.ret();
            continue;
        }
        
        CCallHelpers::Jump jump = block->last().generate(jit, context);
        for (Inst& inst : *block)
            jump = inst.generate(jit, context);
        switch (block->numSuccessors()) {
        case 0:
            ASSERT(!jump.isSet());
            break;
        case 1:
            link(jump, block->successorBlock(0));
            break;
        case 2:
            link(jump, block->successorBlock(0));
            if (block->successorBlock(1) != code.findNextBlock(block))
                link(jit.jump(), block->successorBlock(1));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }

    for (auto& latePath : context.latePaths)
        latePath->run(jit, context);
}
Exemplo n.º 7
0
Ref<Node> ShadowRoot::cloneNodeInternal(Document&, CloningOperation)
{
    RELEASE_ASSERT_NOT_REACHED();
    return *static_cast<Node*>(nullptr); // ShadowRoots should never be cloned.
}
Exemplo n.º 8
0
RefPtr<WebCore::IDBRequest> IDBObjectStore::openCursor(ScriptExecutionContext*, IDBKeyRange*, ExceptionCode&)
{
    RELEASE_ASSERT_NOT_REACHED();
}
Exemplo n.º 9
0
void JSCell::putDirectVirtual(JSObject*, ExecState*, PropertyName, JSValue, unsigned)
{
    RELEASE_ASSERT_NOT_REACHED();
}
Exemplo n.º 10
0
bool JSCell::customHasInstance(JSObject*, ExecState*, JSValue)
{
    RELEASE_ASSERT_NOT_REACHED();
    return false;
}
Exemplo n.º 11
0
void JSCell::getPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode)
{
    RELEASE_ASSERT_NOT_REACHED();
}
Exemplo n.º 12
0
String JSCell::className(const JSObject*)
{
    RELEASE_ASSERT_NOT_REACHED();
    return String();
}
Exemplo n.º 13
0
bool JSCell::getOwnPropertySlotByIndex(JSObject*, ExecState*, unsigned, PropertySlot&)
{
    RELEASE_ASSERT_NOT_REACHED();
    return false;
}
Exemplo n.º 14
0
JSValue JSCell::defaultValue(const JSObject*, ExecState*, PreferredPrimitiveType)
{
    RELEASE_ASSERT_NOT_REACHED();
    return jsUndefined();
}
Exemplo n.º 15
0
GetByIdStatus GetByIdStatus::computeForStubInfo(
    const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid,
    CallLinkStatus::ExitSiteData callExitSiteData)
{
    if (!stubInfo || !stubInfo->seen)
        return GetByIdStatus(NoInformation);
    
    PolymorphicGetByIdList* list = 0;
    State slowPathState = TakesSlowPath;
    if (stubInfo->accessType == access_get_by_id_list) {
        list = stubInfo->u.getByIdList.list;
        for (unsigned i = 0; i < list->size(); ++i) {
            const GetByIdAccess& access = list->at(i);
            if (access.doesCalls())
                slowPathState = MakesCalls;
        }
    }
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_state = Simple;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo->accessType) {
    case access_unset:
        return GetByIdStatus(NoInformation);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
        if (structure->takesSlowPathInDFGForImpureProperty())
            return GetByIdStatus(slowPathState, true);
        unsigned attributesIgnored;
        GetByIdVariant variant;
        variant.m_offset = structure->getConcurrently(uid, attributesIgnored);
        if (!isValidOffset(variant.m_offset))
            return GetByIdStatus(slowPathState, true);
        
        variant.m_structureSet.add(structure);
        bool didAppend = result.appendVariant(variant);
        ASSERT_UNUSED(didAppend, didAppend);
        return result;
    }
        
    case access_get_by_id_list: {
        for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
            Structure* structure = list->at(listIndex).structure();
            
            ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
                profiledBlock, structure, list->at(listIndex).chain(),
                list->at(listIndex).chainCount(), uid);
             
            switch (complexGetStatus.kind()) {
            case ComplexGetStatus::ShouldSkip:
                continue;
                 
            case ComplexGetStatus::TakesSlowPath:
                return GetByIdStatus(slowPathState, true);
                 
            case ComplexGetStatus::Inlineable: {
                std::unique_ptr<CallLinkStatus> callLinkStatus;
                switch (list->at(listIndex).type()) {
                case GetByIdAccess::SimpleInline:
                case GetByIdAccess::SimpleStub: {
                    break;
                }
                case GetByIdAccess::Getter: {
                    AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>(
                        list->at(listIndex).stubRoutine());
                    callLinkStatus = std::make_unique<CallLinkStatus>(
                        CallLinkStatus::computeFor(
                            locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData));
                    break;
                }
                case GetByIdAccess::CustomGetter:
                case GetByIdAccess::WatchedStub:{
                    // FIXME: It would be totally sweet to support this at some point in the future.
                    // https://bugs.webkit.org/show_bug.cgi?id=133052
                    return GetByIdStatus(slowPathState, true);
                }
                default:
                    RELEASE_ASSERT_NOT_REACHED();
                }
                 
                GetByIdVariant variant(
                    StructureSet(structure), complexGetStatus.offset(), complexGetStatus.chain(),
                    std::move(callLinkStatus));
                 
                if (!result.appendVariant(variant))
                    return GetByIdStatus(slowPathState, true);
                break;
            } }
        }
        
        return result;
    }
        
    default:
        return GetByIdStatus(slowPathState, true);
    }
    
    RELEASE_ASSERT_NOT_REACHED();
    return GetByIdStatus();
}
Exemplo n.º 16
0
void IDBObjectStore::deleteIndex(const String&, ExceptionCode&)
{
    RELEASE_ASSERT_NOT_REACHED();
}
Exemplo n.º 17
0
RefPtr<DOMStringList> IDBObjectStore::indexNames() const
{
    RELEASE_ASSERT_NOT_REACHED();
}
Exemplo n.º 18
0
bool JSCell::getOwnPropertyDescriptor(JSObject*, ExecState*, PropertyName, PropertyDescriptor&)
{
    RELEASE_ASSERT_NOT_REACHED();
    return false;
}
 void handleBlock(BlockIndex blockIndex)
 {
     BasicBlock* block = m_graph.block(blockIndex);
     if (!block)
         return;
     
     m_map.clear();
     
     // First we collect Ranges. If operations within the range have enough redundancy,
     // we hoist. And then we remove additions and checks that fall within the max range.
     
     for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
         Node* node = block->at(nodeIndex);
         RangeKeyAndAddend data = rangeKeyAndAddend(node);
         if (verbose)
             dataLog("For ", node, ": ", data, "\n");
         if (!data)
             continue;
         
         Range& range = m_map[data.m_key];
         if (verbose)
             dataLog("    Range: ", range, "\n");
         if (range.m_count) {
             if (data.m_addend > range.m_maxBound) {
                 range.m_maxBound = data.m_addend;
                 range.m_maxOrigin = node->origin.semantic;
             } else if (data.m_addend < range.m_minBound) {
                 range.m_minBound = data.m_addend;
                 range.m_minOrigin = node->origin.semantic;
             }
         } else {
             range.m_maxBound = data.m_addend;
             range.m_minBound = data.m_addend;
             range.m_minOrigin = node->origin.semantic;
             range.m_maxOrigin = node->origin.semantic;
         }
         range.m_count++;
         if (verbose)
             dataLog("    New range: ", range, "\n");
     }
     
     for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
         Node* node = block->at(nodeIndex);
         RangeKeyAndAddend data = rangeKeyAndAddend(node);
         if (!data)
             continue;
         Range range = m_map[data.m_key];
         if (!isValid(data.m_key, range))
             continue;
         
         // Do the hoisting.
         if (!range.m_hoisted) {
             switch (data.m_key.m_kind) {
             case Addition: {
                 if (range.m_minBound < 0) {
                     insertMustAdd(
                         nodeIndex, NodeOrigin(range.m_minOrigin, node->origin.forExit),
                         data.m_key.m_source, range.m_minBound);
                 }
                 if (range.m_maxBound > 0) {
                     insertMustAdd(
                         nodeIndex, NodeOrigin(range.m_maxOrigin, node->origin.forExit),
                         data.m_key.m_source, range.m_maxBound);
                 }
                 break;
             }
             
             case ArrayBounds: {
                 Node* minNode;
                 Node* maxNode;
                 
                 if (!data.m_key.m_source) {
                     minNode = 0;
                     maxNode = m_insertionSet.insertConstant(
                         nodeIndex, range.m_maxOrigin, jsNumber(range.m_maxBound));
                 } else {
                     minNode = insertAdd(
                         nodeIndex, NodeOrigin(range.m_minOrigin, node->origin.forExit),
                         data.m_key.m_source, range.m_minBound, Arith::Unchecked);
                     maxNode = insertAdd(
                         nodeIndex, NodeOrigin(range.m_maxOrigin, node->origin.forExit),
                         data.m_key.m_source, range.m_maxBound, Arith::Unchecked);
                 }
                 
                 if (minNode) {
                     m_insertionSet.insertNode(
                         nodeIndex, SpecNone, CheckInBounds, node->origin,
                         Edge(minNode, Int32Use), Edge(data.m_key.m_key, Int32Use));
                 }
                 m_insertionSet.insertNode(
                     nodeIndex, SpecNone, CheckInBounds, node->origin,
                     Edge(maxNode, Int32Use), Edge(data.m_key.m_key, Int32Use));
                 break;
             }
             
             default:
                 RELEASE_ASSERT_NOT_REACHED();
             }
             
             m_changed = true;
             m_map[data.m_key].m_hoisted = true;
         }
         
         // Do the elimination.
         switch (data.m_key.m_kind) {
         case Addition:
             node->setArithMode(Arith::Unchecked);
             m_changed = true;
             break;
             
         case ArrayBounds:
             node->convertToPhantom();
             m_changed = true;
             break;
             
         default:
             RELEASE_ASSERT_NOT_REACHED();
         }
     }
     
     m_insertionSet.execute(block);
 }
Exemplo n.º 20
0
    void fixupBlock(BasicBlock* block)
    {
        if (!block)
            return;
        
        switch (m_graph.m_form) {
        case SSA:
            break;
            
        case ThreadedCPS: {
            // Clean up variable links for the block. We need to do this before the actual DCE
            // because we need to see GetLocals, so we can bypass them in situations where the
            // vars-at-tail point to a GetLocal, the GetLocal is dead, but the Phi it points
            // to is alive.
            
            for (unsigned phiIndex = 0; phiIndex < block->phis.size(); ++phiIndex) {
                if (!block->phis[phiIndex]->shouldGenerate()) {
                    // FIXME: We could actually free nodes here. Except that it probably
                    // doesn't matter, since we don't add any nodes after this phase.
                    // https://bugs.webkit.org/show_bug.cgi?id=126239
                    block->phis[phiIndex--] = block->phis.last();
                    block->phis.removeLast();
                }
            }
            
            cleanVariables(block->variablesAtHead);
            cleanVariables(block->variablesAtTail);
            break;
        }
            
        default:
            RELEASE_ASSERT_NOT_REACHED();
            return;
        }

        for (unsigned indexInBlock = block->size(); indexInBlock--;) {
            Node* node = block->at(indexInBlock);
            if (node->shouldGenerate())
                continue;
                
            switch (node->op()) {
            case MovHint: {
                ASSERT(node->child1().useKind() == UntypedUse);
                if (!node->child1()->shouldGenerate()) {
                    node->setOpAndDefaultFlags(ZombieHint);
                    node->child1() = Edge();
                    break;
                }
                node->setOpAndDefaultFlags(MovHint);
                break;
            }
                
            case ZombieHint: {
                // Currently we assume that DCE runs only once.
                RELEASE_ASSERT_NOT_REACHED();
                break;
            }
            
            default: {
                if (node->flags() & NodeHasVarArgs) {
                    for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
                        Edge edge = m_graph.m_varArgChildren[childIdx];

                        if (!edge || edge.willNotHaveCheck())
                            continue;

                        m_insertionSet.insertNode(indexInBlock, SpecNone, Phantom, node->origin, edge);
                    }

                    node->convertToPhantomUnchecked();
                    node->children.reset();
                    node->setRefCount(1);
                    break;
                }

                node->convertToPhantom();
                eliminateIrrelevantPhantomChildren(node);
                node->setRefCount(1);
                break;
            } }
        }

        m_insertionSet.execute(block);
    }
    void propagate(Node* node)
    {
        NodeFlags flags = node->flags() & NodeBytecodeBackPropMask;

        switch (node->op()) {
        case GetLocal: {
            VariableAccessData* variableAccessData = node->variableAccessData();
            flags &= ~NodeBytecodeUsesAsInt; // We don't care about cross-block uses-as-int.
            m_changed |= variableAccessData->mergeFlags(flags);
            break;
        }

        case SetLocal: {
            VariableAccessData* variableAccessData = node->variableAccessData();
            if (!variableAccessData->isLoadedFrom())
                break;
            flags = variableAccessData->flags();
            RELEASE_ASSERT(!(flags & ~NodeBytecodeBackPropMask));
            flags |= NodeBytecodeUsesAsNumber; // Account for the fact that control flow may cause overflows that our modeling can't handle.
            node->child1()->mergeFlags(flags);
            break;
        }

        case Flush: {
            VariableAccessData* variableAccessData = node->variableAccessData();
            m_changed |= variableAccessData->mergeFlags(NodeBytecodeUsesAsValue);
            break;
        }

        case MovHint:
        case Check:
            break;

        case BitAnd:
        case BitOr:
        case BitXor:
        case BitRShift:
        case BitLShift:
        case BitURShift:
        case ArithIMul: {
            flags |= NodeBytecodeUsesAsInt;
            flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther);
            flags &= ~NodeBytecodeUsesAsArrayIndex;
            node->child1()->mergeFlags(flags);
            node->child2()->mergeFlags(flags);
            break;
        }

        case StringCharCodeAt: {
            node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
            node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
            break;
        }

        case UInt32ToNumber: {
            node->child1()->mergeFlags(flags);
            break;
        }

        case ValueAdd: {
            if (isNotNegZero(node->child1().node()) || isNotNegZero(node->child2().node()))
                flags &= ~NodeBytecodeNeedsNegZero;
            if (node->child1()->hasNumberResult() || node->child2()->hasNumberResult())
                flags &= ~NodeBytecodeUsesAsOther;
            if (!isWithinPowerOfTwo<32>(node->child1()) && !isWithinPowerOfTwo<32>(node->child2()))
                flags |= NodeBytecodeUsesAsNumber;
            if (!m_allowNestedOverflowingAdditions)
                flags |= NodeBytecodeUsesAsNumber;

            node->child1()->mergeFlags(flags);
            node->child2()->mergeFlags(flags);
            break;
        }

        case ArithAdd: {
            flags &= ~NodeBytecodeUsesAsOther;
            if (isNotNegZero(node->child1().node()) || isNotNegZero(node->child2().node()))
                flags &= ~NodeBytecodeNeedsNegZero;
            if (!isWithinPowerOfTwo<32>(node->child1()) && !isWithinPowerOfTwo<32>(node->child2()))
                flags |= NodeBytecodeUsesAsNumber;
            if (!m_allowNestedOverflowingAdditions)
                flags |= NodeBytecodeUsesAsNumber;

            node->child1()->mergeFlags(flags);
            node->child2()->mergeFlags(flags);
            break;
        }

        case ArithClz32: {
            flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther | ~NodeBytecodeUsesAsArrayIndex);
            flags |= NodeBytecodeUsesAsInt;
            node->child1()->mergeFlags(flags);
            break;
        }

        case ArithSub: {
            flags &= ~NodeBytecodeUsesAsOther;
            if (isNotNegZero(node->child1().node()) || isNotPosZero(node->child2().node()))
                flags &= ~NodeBytecodeNeedsNegZero;
            if (!isWithinPowerOfTwo<32>(node->child1()) && !isWithinPowerOfTwo<32>(node->child2()))
                flags |= NodeBytecodeUsesAsNumber;
            if (!m_allowNestedOverflowingAdditions)
                flags |= NodeBytecodeUsesAsNumber;

            node->child1()->mergeFlags(flags);
            node->child2()->mergeFlags(flags);
            break;
        }

        case ArithNegate: {
            flags &= ~NodeBytecodeUsesAsOther;

            node->child1()->mergeFlags(flags);
            break;
        }

        case ArithMul: {
            // As soon as a multiply happens, we can easily end up in the part
            // of the double domain where the point at which you do truncation
            // can change the outcome. So, ArithMul always forces its inputs to
            // check for overflow. Additionally, it will have to check for overflow
            // itself unless we can prove that there is no way for the values
            // produced to cause double rounding.

            if (!isWithinPowerOfTwo<22>(node->child1().node())
                && !isWithinPowerOfTwo<22>(node->child2().node()))
                flags |= NodeBytecodeUsesAsNumber;

            node->mergeFlags(flags);

            flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero;
            flags &= ~NodeBytecodeUsesAsOther;

            node->child1()->mergeFlags(flags);
            node->child2()->mergeFlags(flags);
            break;
        }

        case ArithDiv: {
            flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero;
            flags &= ~NodeBytecodeUsesAsOther;

            node->child1()->mergeFlags(flags);
            node->child2()->mergeFlags(flags);
            break;
        }

        case ArithMod: {
            flags |= NodeBytecodeUsesAsNumber;
            flags &= ~NodeBytecodeUsesAsOther;

            node->child1()->mergeFlags(flags);
            node->child2()->mergeFlags(flags & ~NodeBytecodeNeedsNegZero);
            break;
        }

        case GetByVal: {
            node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
            node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
            break;
        }

        case NewArrayWithSize: {
            node->child1()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
            break;
        }

        case NewTypedArray: {
            // Negative zero is not observable. NaN versus undefined are only observable
            // in that you would get a different exception message. So, like, whatever: we
            // claim here that NaN v. undefined is observable.
            node->child1()->mergeFlags(NodeBytecodeUsesAsInt | NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsArrayIndex);
            break;
        }

        case StringCharAt: {
            node->child1()->mergeFlags(NodeBytecodeUsesAsValue);
            node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
            break;
        }

        case ToString:
        case CallStringConstructor: {
            node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
            break;
        }

        case ToPrimitive:
        case ToNumber: {
            node->child1()->mergeFlags(flags);
            break;
        }

        case PutByValDirect:
        case PutByVal: {
            m_graph.varArgChild(node, 0)->mergeFlags(NodeBytecodeUsesAsValue);
            m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex);
            m_graph.varArgChild(node, 2)->mergeFlags(NodeBytecodeUsesAsValue);
            break;
        }

        case Switch: {
            SwitchData* data = node->switchData();
            switch (data->kind) {
            case SwitchImm:
                // We don't need NodeBytecodeNeedsNegZero because if the cases are all integers
                // then -0 and 0 are treated the same.  We don't need NodeBytecodeUsesAsOther
                // because if all of the cases are integers then NaN and undefined are
                // treated the same (i.e. they will take default).
                node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsInt);
                break;
            case SwitchChar: {
                // We don't need NodeBytecodeNeedsNegZero because if the cases are all strings
                // then -0 and 0 are treated the same.  We don't need NodeBytecodeUsesAsOther
                // because if all of the cases are single-character strings then NaN
                // and undefined are treated the same (i.e. they will take default).
                node->child1()->mergeFlags(NodeBytecodeUsesAsNumber);
                break;
            }
            case SwitchString:
                // We don't need NodeBytecodeNeedsNegZero because if the cases are all strings
                // then -0 and 0 are treated the same.
                node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
                break;
            case SwitchCell:
                // There is currently no point to being clever here since this is used for switching
                // on objects.
                mergeDefaultFlags(node);
                break;
            }
            break;
        }

        case Identity:
            // This would be trivial to handle but we just assert that we cannot see these yet.
            RELEASE_ASSERT_NOT_REACHED();
            break;

        // Note: ArithSqrt, ArithUnary and other math intrinsics don't have special
        // rules in here because they are always followed by Phantoms to signify that if the
        // method call speculation fails, the bytecode may use the arguments in arbitrary ways.
        // This corresponds to that possibility of someone doing something like:
        // Math.sin = function(x) { doArbitraryThingsTo(x); }

        default:
            mergeDefaultFlags(node);
            break;
        }
    }
Exemplo n.º 22
0
FTL::ForOSREntryJITCode* JITCode::ftlForOSREntry()
{
    RELEASE_ASSERT_NOT_REACHED();
    return 0;
}
Exemplo n.º 23
0
unsigned JITCode::offsetOf(void*)
{
    // We currently don't have visibility into the FTL code.
    RELEASE_ASSERT_NOT_REACHED();
    return 0;
}
Exemplo n.º 24
0
DFG::CommonData* JITCode::dfgCommon()
{
    RELEASE_ASSERT_NOT_REACHED();
    return 0;
}
    bool run()
    {
        RELEASE_ASSERT(m_graph.m_plan.mode == DFGMode);

        if (!Options::useFTLJIT())
            return false;

        if (m_graph.m_profiledBlock->m_didFailFTLCompilation)
            return false;

#if ENABLE(FTL_JIT)
        FTL::CapabilityLevel level = FTL::canCompile(m_graph);
        if (level == FTL::CannotCompile)
            return false;

        if (!Options::enableOSREntryToFTL())
            level = FTL::CanCompile;

        InsertionSet insertionSet(m_graph);
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;

            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node* node = block->at(nodeIndex);
                if (node->op() != LoopHint)
                    continue;

                // We only put OSR checks for the first LoopHint in the block. Note that
                // more than one LoopHint could happen in cases where we did a lot of CFG
                // simplification in the bytecode parser, but it should be very rare.

                NodeOrigin origin = node->origin;

                if (level != FTL::CanCompileAndOSREnter || origin.semantic.inlineCallFrame) {
                    insertionSet.insertNode(
                        nodeIndex + 1, SpecNone, CheckTierUpInLoop, origin);
                    break;
                }

                bool isAtTop = true;
                for (unsigned subNodeIndex = nodeIndex; subNodeIndex--;) {
                    if (!block->at(subNodeIndex)->isSemanticallySkippable()) {
                        isAtTop = false;
                        break;
                    }
                }

                if (!isAtTop) {
                    insertionSet.insertNode(
                        nodeIndex + 1, SpecNone, CheckTierUpInLoop, origin);
                    break;
                }

                insertionSet.insertNode(
                    nodeIndex + 1, SpecNone, CheckTierUpAndOSREnter, origin);
                break;
            }

            NodeAndIndex terminal = block->findTerminal();
            if (terminal.node->op() == Return) {
                insertionSet.insertNode(
                    terminal.index, SpecNone, CheckTierUpAtReturn, terminal.node->origin);
            }

            insertionSet.execute(block);
        }

        m_graph.m_plan.willTryToTierUp = true;
        return true;
#else // ENABLE(FTL_JIT)
        RELEASE_ASSERT_NOT_REACHED();
        return false;
#endif // ENABLE(FTL_JIT)
    }
Exemplo n.º 26
0
DFG::JITCode* JITCode::dfg()
{
    RELEASE_ASSERT_NOT_REACHED();
    return 0;
}
Exemplo n.º 27
0
SUPPRESS_ASAN
void* prepareOSREntry(
    ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
    unsigned bytecodeIndex, unsigned streamIndex)
{
    VM& vm = exec->vm();
    CodeBlock* baseline = dfgCodeBlock->baselineVersion();
    ExecutableBase* executable = dfgCodeBlock->ownerExecutable();
    DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
    ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
    
    if (Options::verboseOSR()) {
        dataLog(
            "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
            bytecodeIndex, ".\n");
    }
    
    if (bytecodeIndex)
        jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true);

    if (bytecodeIndex != entryCode->bytecodeIndex()) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n");
        return 0;
    }
    
    Operands<JSValue> values;
    dfgCode->reconstruct(
        exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
    
    if (Options::verboseOSR())
        dataLog("    Values at entry: ", values, "\n");
    
    for (int argument = values.numberOfArguments(); argument--;) {
        JSValue valueOnStack = exec->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue();
        JSValue reconstructedValue = values.argument(argument);
        if (valueOnStack == reconstructedValue || !argument)
            continue;
        dataLog("Mismatch between reconstructed values and the the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n");
        dataLog("    Value on stack: ", valueOnStack, "\n");
        dataLog("    Reconstructed value: ", reconstructedValue, "\n");
        RELEASE_ASSERT_NOT_REACHED();
    }
    
    RELEASE_ASSERT(
        static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
    
    EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
        entryCode->entryBuffer()->dataBuffer());
    
    for (int local = values.numberOfLocals(); local--;)
        scratch[local] = JSValue::encode(values.local(local));
    
    int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm.interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()])) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    exec->setCodeBlock(entryCodeBlock);
    
    void* result = entryCode->addressForCall(
        vm, executable, ArityCheckNotRequired,
        RegisterPreservationNotRequired).executableAddress();
    if (Options::verboseOSR())
        dataLog("    Entry will succeed, going to address", RawPointer(result), "\n");
    
    return result;
}
Exemplo n.º 28
0
FTL::JITCode* JITCode::ftl()
{
    RELEASE_ASSERT_NOT_REACHED();
    return 0;
}
Exemplo n.º 29
0
char* JIT_OPERATION triggerOSREntryNow(
    ExecState* exec, int32_t bytecodeIndex, int32_t streamIndex)
{
    VM* vm = &exec->vm();
    NativeCallFrameTracer tracer(vm, exec);
    DeferGC deferGC(vm->heap);
    CodeBlock* codeBlock = exec->codeBlock();
    
    if (codeBlock->jitType() != JITCode::DFGJIT) {
        dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n");
        RELEASE_ASSERT_NOT_REACHED();
    }
    
    JITCode* jitCode = codeBlock->jitCode()->dfg();
    
    if (Options::verboseOSR()) {
        dataLog(
            *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ",
            jitCode->tierUpCounter, "\n");
    }
    
    // - If we don't have an FTL code block, then try to compile one.
    // - If we do have an FTL code block, then try to enter for a while.
    // - If we couldn't enter for a while, then trigger OSR entry.
    
    triggerFTLReplacementCompile(vm, codeBlock, jitCode);

    if (!codeBlock->hasOptimizedReplacement())
        return 0;
    
    if (jitCode->osrEntryRetry < Options::ftlOSREntryRetryThreshold()) {
        jitCode->osrEntryRetry++;
        return 0;
    }
    
    // It's time to try to compile code for OSR entry.
    Worklist::State worklistState;
    if (Worklist* worklist = existingGlobalFTLWorklistOrNull()) {
        worklistState = worklist->completeAllReadyPlansForVM(
            *vm, CompilationKey(codeBlock->baselineVersion(), FTLForOSREntryMode));
    } else
        worklistState = Worklist::NotKnown;
    
    if (worklistState == Worklist::Compiling)
        return 0;
    
    if (CodeBlock* entryBlock = jitCode->osrEntryBlock.get()) {
        void* address = FTL::prepareOSREntry(
            exec, codeBlock, entryBlock, bytecodeIndex, streamIndex);
        if (address)
            return static_cast<char*>(address);
        
        FTL::ForOSREntryJITCode* entryCode = entryBlock->jitCode()->ftlForOSREntry();
        entryCode->countEntryFailure();
        if (entryCode->entryFailureCount() <
            Options::ftlOSREntryFailureCountForReoptimization())
            return 0;
        
        // OSR entry failed. Oh no! This implies that we need to retry. We retry
        // without exponential backoff and we only do this for the entry code block.
        jitCode->osrEntryBlock.clear();
        jitCode->osrEntryRetry = 0;
        return 0;
    }
    
    if (worklistState == Worklist::Compiled) {
        // This means that compilation failed and we already set the thresholds.
        if (Options::verboseOSR())
            dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
        return 0;
    }

    // We aren't compiling and haven't compiled anything for OSR entry. So, try to compile
    // something.
    Operands<JSValue> mustHandleValues;
    jitCode->reconstruct(
        exec, codeBlock, CodeOrigin(bytecodeIndex), streamIndex, mustHandleValues);
    RefPtr<CodeBlock> replacementCodeBlock = codeBlock->newReplacement();
    CompilationResult forEntryResult = compile(
        *vm, replacementCodeBlock.get(), codeBlock, FTLForOSREntryMode, bytecodeIndex,
        mustHandleValues, ToFTLForOSREntryDeferredCompilationCallback::create(codeBlock));
    
    if (forEntryResult != CompilationSuccessful) {
        ASSERT(forEntryResult == CompilationDeferred || replacementCodeBlock->hasOneRef());
        return 0;
    }

    // It's possible that the for-entry compile already succeeded. In that case OSR
    // entry will succeed unless we ran out of stack. It's not clear what we should do.
    // We signal to try again after a while if that happens.
    void* address = FTL::prepareOSREntry(
        exec, codeBlock, jitCode->osrEntryBlock.get(), bytecodeIndex, streamIndex);
    return static_cast<char*>(address);
}
Exemplo n.º 30
0
RefPtr<WebCore::IDBRequest> IDBObjectStore::openCursor(ScriptExecutionContext*, const Deprecated::ScriptValue&, const String&, ExceptionCode&)
{
    RELEASE_ASSERT_NOT_REACHED();
}