示例#1
0
CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
#if ENABLE(LLINT)
#if ENABLE(DFG_JIT)
    if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadFunction))) {
        // We could force this to be a closure call, but instead we'll just assume that it
        // takes slow path.
        return takesSlowPath();
    }
#else
    UNUSED_PARAM(locker);
#endif

    VM& vm = *profiledBlock->vm();

    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
    OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
    if (op != op_call && op != op_construct)
        return CallLinkStatus();

    LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;

    return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
#else
    return CallLinkStatus();
#endif
}
示例#2
0
CallLinkStatus CallLinkStatus::computeFor(const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
{
    // Note that despite requiring that the locker is held, this code is racy with respect
    // to the CallLinkInfo: it may get cleared while this code runs! This is because
    // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
    // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
    // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
    // itself to figure out which lock to lock.
    //
    // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
    // path count, the stub, and the target - can all be asked racily. Stubs and targets can
    // only be deleted at next GC, so if we load a non-null one, then it must contain data
    // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
    // is probably OK for now.

    if (callLinkInfo.slowPathCount >= Options::couldTakeSlowCaseMinimumCount())
        return takesSlowPath();

    if (ClosureCallStubRoutine* stub = callLinkInfo.stub.get())
        return CallLinkStatus(stub->executable(), stub->structure());

    JSFunction* target = callLinkInfo.lastSeenCallee.get();
    if (!target)
        return CallLinkStatus();

    if (callLinkInfo.hasSeenClosure)
        return CallLinkStatus(target->executable(), target->structure());

    return CallLinkStatus(target);
}
示例#3
0
void CallLinkStatus::computeDFGStatuses(
    CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map)
{
#if ENABLE(DFG_JIT)
    RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT);
    CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative();
    for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) {
        CallLinkInfo& info = **iter;
        CodeOrigin codeOrigin = info.codeOrigin;

        bool takeSlowPath;
        bool badFunction;

        // Check if we had already previously made a terrible mistake in the FTL for this
        // code origin. Note that this is approximate because we could have a monovariant
        // inline in the FTL that ended up failing. We should fix that at some point by
        // having data structures to track the context of frequent exits. This is currently
        // challenging because it would require creating a CodeOrigin-based database in
        // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the
        // InlineCallFrames.
        CodeBlock* currentBaseline =
            baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
        {
            ConcurrentJITLocker locker(currentBaseline->m_lock);
            takeSlowPath =
                currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadCache, ExitFromFTL))
                || currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadCacheWatchpoint, ExitFromFTL))
                || currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadExecutable, ExitFromFTL));
            badFunction =
                currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadFunction, ExitFromFTL));
        }

        {
            ConcurrentJITLocker locker(dfgCodeBlock->m_lock);
            if (takeSlowPath)
                map.add(info.codeOrigin, takesSlowPath());
            else {
                CallLinkStatus status = computeFor(locker, info);
                if (status.isSet()) {
                    if (badFunction)
                        status.makeClosureCall();
                    map.add(info.codeOrigin, status);
                }
            }
        }
    }
#else
    UNUSED_PARAM(dfgCodeBlock);
#endif // ENABLE(DFG_JIT)

    if (verbose) {
        dataLog("Context map:\n");
        ContextMap::iterator iter = map.begin();
        ContextMap::iterator end = map.end();
        for (; iter != end; ++iter) {
            dataLog("    ", iter->key, ":\n");
            dataLog("        ", iter->value, "\n");
        }
    }
}
示例#4
0
CallLinkStatus CallLinkStatus::computeFor(
    CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
{
    ConcurrentJITLocker locker(profiledBlock->m_lock);

    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(map);
#if ENABLE(DFG_JIT)
    if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
            || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCacheWatchpoint))
            || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable)))
        return takesSlowPath();

    CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
    if (!callLinkInfo)
        return computeFromLLInt(locker, profiledBlock, bytecodeIndex);

    CallLinkStatus result = computeFor(locker, *callLinkInfo);
    if (!result)
        return computeFromLLInt(locker, profiledBlock, bytecodeIndex);

    if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadFunction)))
        result.makeClosureCall();

    return result;
#else
    return CallLinkStatus();
#endif
}
示例#5
0
ComplexGetStatus ComplexGetStatus::computeFor(
    CodeBlock* profiledBlock, Structure* headStructure, StructureChain* chain,
    unsigned chainCount, AtomicStringImpl* uid)
{
    // FIXME: We should assert that we never see a structure that
    // hasImpureGetOwnPropertySlot() but for which we don't
    // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do
    // that, yet.
    // https://bugs.webkit.org/show_bug.cgi?id=131810
    
    if (headStructure->takesSlowPathInDFGForImpureProperty())
        return takesSlowPath();
    
    ComplexGetStatus result;
    result.m_kind = Inlineable;
    
    if (chain && chainCount) {
        result.m_chain = adoptRef(new IntendedStructureChain(
            profiledBlock, headStructure, chain, chainCount));
        
        if (!result.m_chain->isStillValid())
            return skip();
        
        if (headStructure->takesSlowPathInDFGForImpureProperty()
            || result.m_chain->takesSlowPathInDFGForImpureProperty())
            return takesSlowPath();
        
        JSObject* currentObject = result.m_chain->terminalPrototype();
        Structure* currentStructure = result.m_chain->last();
        
        ASSERT_UNUSED(currentObject, currentObject);
        
        result.m_offset = currentStructure->getConcurrently(uid, result.m_attributes);
    } else {
        result.m_offset = headStructure->getConcurrently(uid, result.m_attributes);
    }
    
    if (!isValidOffset(result.m_offset))
        return takesSlowPath();
    
    return result;
}
示例#6
0
CallLinkStatus CallLinkStatus::computeFor(
    const ConcurrentJITLocker& locker, CallLinkInfo& callLinkInfo, ExitSiteData exitSiteData)
{
    if (exitSiteData.m_takesSlowPath)
        return takesSlowPath();
    
    CallLinkStatus result = computeFor(locker, callLinkInfo);
    if (exitSiteData.m_badFunction)
        result.makeClosureCall();
    
    return result;
}
示例#7
0
CallLinkStatus CallLinkStatus::computeFor(const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
{
    if (callLinkInfo.slowPathCount >= Options::couldTakeSlowCaseMinimumCount())
        return takesSlowPath();
    
    if (callLinkInfo.stub)
        return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure());
    
    JSFunction* target = callLinkInfo.lastSeenCallee.get();
    if (!target)
        return CallLinkStatus();
    
    if (callLinkInfo.hasSeenClosure)
        return CallLinkStatus(target->executable(), target->structure());

    return CallLinkStatus(target);
}
示例#8
0
CallLinkStatus CallLinkStatus::computeFor(
    CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
{
    ConcurrentJITLocker locker(profiledBlock->m_lock);
    
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(map);
#if ENABLE(DFG_JIT)
    ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex);
    if (exitSiteData.m_takesSlowPath)
        return takesSlowPath();
    
    CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
    if (!callLinkInfo)
        return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
    
    return computeFor(locker, *callLinkInfo, exitSiteData);
#else
    return CallLinkStatus();
#endif
}
示例#9
0
CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
    const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
{
    if (callLinkInfo.clearedByGC())
        return takesSlowPath();
    
    // Note that despite requiring that the locker is held, this code is racy with respect
    // to the CallLinkInfo: it may get cleared while this code runs! This is because
    // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
    // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
    // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
    // itself to figure out which lock to lock.
    //
    // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
    // path count, the stub, and the target - can all be asked racily. Stubs and targets can
    // only be deleted at next GC, so if we load a non-null one, then it must contain data
    // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
    // is probably OK for now.
    
    // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive
    // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is
    // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative
    // fencing in place to make sure that we see the variants list after construction.
    if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) {
        WTF::loadLoadFence();
        
        CallEdgeList edges = stub->edges();
        
        // Now that we've loaded the edges list, there are no further concurrency concerns. We will
        // just manipulate and prune this list to our liking - mostly removing entries that are too
        // infrequent and ensuring that it's sorted in descending order of frequency.
        
        RELEASE_ASSERT(edges.size());
        
        std::sort(
            edges.begin(), edges.end(),
            [] (CallEdge a, CallEdge b) {
                return a.count() > b.count();
            });
        RELEASE_ASSERT(edges.first().count() >= edges.last().count());
        
        double totalCallsToKnown = 0;
        double totalCallsToUnknown = callLinkInfo.slowPathCount();
        CallVariantList variants;
        for (size_t i = 0; i < edges.size(); ++i) {
            CallEdge edge = edges[i];
            // If the call is at the tail of the distribution, then we don't optimize it and we
            // treat it as if it was a call to something unknown. We define the tail as being either
            // a call that doesn't belong to the N most frequent callees (N =
            // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too
            // small.
            if (i >= Options::maxPolymorphicCallVariantsForInlining()
                || edge.count() < Options::frequentCallThreshold())
                totalCallsToUnknown += edge.count();
            else {
                totalCallsToKnown += edge.count();
                variants.append(edge.callee());
            }
        }
        
        // Bail if we didn't find any calls that qualified.
        RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size());
        if (variants.isEmpty())
            return takesSlowPath();
        
        // We require that the distribution of callees is skewed towards a handful of common ones.
        if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate())
            return takesSlowPath();
        
        RELEASE_ASSERT(totalCallsToKnown);
        RELEASE_ASSERT(variants.size());
        
        CallLinkStatus result;
        result.m_variants = variants;
        result.m_couldTakeSlowPath = !!totalCallsToUnknown;
        return result;
    }
    
    CallLinkStatus result;
    
    if (JSFunction* target = callLinkInfo.lastSeenCallee()) {
        CallVariant variant(target);
        if (callLinkInfo.hasSeenClosure())
            variant = variant.despecifiedClosure();
        result.m_variants.append(variant);
    }
    
    result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount();

    return result;
}