Пример #1
0
GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, Identifier& ident)
{
    // For now we only handle the super simple self access case. We could handle the
    // prototype case in the future.
    
    if (PropertyName(ident).asIndex() != PropertyName::NotAnIndex)
        return GetByIdStatus(TakesSlowPath);
    
    if (structure->typeInfo().overridesGetOwnPropertySlot())
        return GetByIdStatus(TakesSlowPath);
    
    if (!structure->propertyAccessesAreCacheable())
        return GetByIdStatus(TakesSlowPath);
    
    GetByIdStatus result;
    result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, Identifier&) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
    unsigned attributes;
    JSCell* specificValue;
    result.m_offset = structure->get(vm, ident, attributes, specificValue);
    if (!isValidOffset(result.m_offset))
        return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
    if (attributes & Accessor)
        return GetByIdStatus(MakesCalls);
    if (structure->isDictionary())
        specificValue = 0;
    result.m_structureSet.add(structure);
    result.m_specificValue = JSValue(specificValue);
    return result;
}
Пример #2
0
GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(uid);

    VM& vm = *profiledBlock->vm();
    
    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
    
    if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length))
        return GetByIdStatus(NoInformation, false);

    StructureID structureID = instruction[4].u.structureID;
    if (!structureID)
        return GetByIdStatus(NoInformation, false);

    Structure* structure = vm.heap.structureIDTable().get(structureID);

    if (structure->takesSlowPathInDFGForImpureProperty())
        return GetByIdStatus(NoInformation, false);

    unsigned attributesIgnored;
    PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored);
    if (!isValidOffset(offset))
        return GetByIdStatus(NoInformation, false);
    
    return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
}
Пример #3
0
GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
{
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(ident);
#if ENABLE(LLINT)
    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
    
    if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length))
        return GetByIdStatus(NoInformation, false);

    Structure* structure = instruction[4].u.structure.get();
    if (!structure)
        return GetByIdStatus(NoInformation, false);
    
    unsigned attributesIgnored;
    JSCell* specificValue;
    PropertyOffset offset = structure->get(
        *profiledBlock->vm(), ident, attributesIgnored, specificValue);
    if (structure->isDictionary())
        specificValue = 0;
    if (!isValidOffset(offset))
        return GetByIdStatus(NoInformation, false);
    
    return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
#else
    return GetByIdStatus(NoInformation, false);
#endif
}
Пример #4
0
GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
{
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(ident);
#if ENABLE(LLINT)
    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
    
    if (instruction[0].u.opcode == llint_op_method_check)
        instruction++;

    Structure* structure = instruction[4].u.structure.get();
    if (!structure)
        return GetByIdStatus(NoInformation, false);
    
    unsigned attributesIgnored;
    JSCell* specificValue;
    size_t offset = structure->get(
        *profiledBlock->globalData(), ident, attributesIgnored, specificValue);
    if (offset == notFound)
        return GetByIdStatus(NoInformation, false);
    
    return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
#else
    return GetByIdStatus(NoInformation, false);
#endif
}
Пример #5
0
GetByIdStatus GetByIdStatus::computeFor(
    CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
    StubInfoMap& dfgMap, CodeOrigin codeOrigin, StringImpl* uid)
{
#if ENABLE(DFG_JIT)
    if (dfgBlock) {
        GetByIdStatus result;
        {
            ConcurrentJITLocker locker(dfgBlock->m_lock);
            result = computeForStubInfo(locker, dfgBlock, dfgMap.get(codeOrigin), uid);
        }
        
        if (result.takesSlowPath())
            return result;
    
        {
            ConcurrentJITLocker locker(profiledBlock->m_lock);
            if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex, ExitFromFTL))
                return GetByIdStatus(TakesSlowPath, true);
        }
        
        if (result.isSet())
            return result;
    }
#else
    UNUSED_PARAM(dfgBlock);
    UNUSED_PARAM(dfgMap);
#endif

    return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
}
Пример #6
0
GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, StringImpl* uid)
{
    // For now we only handle the super simple self access case. We could handle the
    // prototype case in the future.
    
    if (set.isEmpty())
        return GetByIdStatus();

    if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
        return GetByIdStatus(TakesSlowPath);
    
    GetByIdStatus result;
    result.m_state = Simple;
    result.m_wasSeenInJIT = false;
    for (unsigned i = 0; i < set.size(); ++i) {
        Structure* structure = set[i];
        if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
            return GetByIdStatus(TakesSlowPath);
        
        if (!structure->propertyAccessesAreCacheable())
            return GetByIdStatus(TakesSlowPath);
        
        unsigned attributes;
        PropertyOffset offset = structure->getConcurrently(uid, attributes);
        if (!isValidOffset(offset))
            return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
        if (attributes & Accessor)
            return GetByIdStatus(MakesCalls); // We could be smarter here, like strenght-reducing this to a Call.
        
        if (!result.appendVariant(GetByIdVariant(structure, offset)))
            return GetByIdStatus(TakesSlowPath);
    }
    
    return result;
}
Пример #7
0
GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid)
{
    // For now we only handle the super simple self access case. We could handle the
    // prototype case in the future.
    
    if (!structure)
        return GetByIdStatus(TakesSlowPath);

    if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
        return GetByIdStatus(TakesSlowPath);
    
    if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
        return GetByIdStatus(TakesSlowPath);
    
    if (!structure->propertyAccessesAreCacheable())
        return GetByIdStatus(TakesSlowPath);

    unsigned attributes;
    JSCell* specificValue;
    PropertyOffset offset = structure->getConcurrently(vm, uid, attributes, specificValue);
    if (!isValidOffset(offset))
        return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
    if (attributes & Accessor)
        return GetByIdStatus(MakesCalls);
    if (structure->isDictionary())
        specificValue = 0;
    return GetByIdStatus(
        Simple, false, GetByIdVariant(StructureSet(structure), offset, specificValue));
}
Пример #8
0
GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
    GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
        locker, profiledBlock, stubInfo, uid,
        CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex));

    if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
        return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true);
    return result;
}
Пример #9
0
GetByIdStatus GetByIdStatus::computeFor(
    CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
    StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
#if ENABLE(DFG_JIT)
    if (dfgBlock) {
        CallLinkStatus::ExitSiteData exitSiteData;
        {
            ConcurrentJITLocker locker(profiledBlock->m_lock);
            exitSiteData = CallLinkStatus::computeExitSiteData(
                locker, profiledBlock, codeOrigin.bytecodeIndex);
        }
        
        GetByIdStatus result;
        {
            ConcurrentJITLocker locker(dfgBlock->m_lock);
            result = computeForStubInfoWithoutExitSiteFeedback(
                locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
        }

        if (result.takesSlowPath())
            return result;
    
        {
            ConcurrentJITLocker locker(profiledBlock->m_lock);
            if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
                return GetByIdStatus(TakesSlowPath, true);
        }
        
        if (result.isSet())
            return result;
    }
#else
    UNUSED_PARAM(dfgBlock);
    UNUSED_PARAM(dfgMap);
#endif

    return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
}
Пример #10
0
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
    ConcurrentJITLocker locker(profiledBlock->m_lock);

    GetByIdStatus result;

#if ENABLE(DFG_JIT)
    result = computeForStubInfoWithoutExitSiteFeedback(
        locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
        CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
    
    if (!result.takesSlowPath()
        && hasExitSite(locker, profiledBlock, bytecodeIndex))
        return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
#else
    UNUSED_PARAM(map);
#endif

    if (!result)
        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
    
    return result;
}
Пример #11
0
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
{
    ConcurrentJITLocker locker(profiledBlock->m_lock);

    GetByIdStatus result;

#if ENABLE(DFG_JIT)
    result = computeForStubInfo(
        locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid);
    
    if (!result.takesSlowPath()
        && (hasExitSite(locker, profiledBlock, bytecodeIndex)
            || profiledBlock->likelyToTakeSlowCase(bytecodeIndex)))
        return GetByIdStatus(TakesSlowPath, true);
#else
    UNUSED_PARAM(map);
#endif

    if (!result)
        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
    
    return result;
}
Пример #12
0
GetByIdStatus GetByIdStatus::computeForStubInfo(
    const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid,
    CallLinkStatus::ExitSiteData callExitSiteData)
{
    if (!stubInfo || !stubInfo->seen)
        return GetByIdStatus(NoInformation);
    
    PolymorphicGetByIdList* list = 0;
    State slowPathState = TakesSlowPath;
    if (stubInfo->accessType == access_get_by_id_list) {
        list = stubInfo->u.getByIdList.list;
        for (unsigned i = 0; i < list->size(); ++i) {
            const GetByIdAccess& access = list->at(i);
            if (access.doesCalls())
                slowPathState = MakesCalls;
        }
    }
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_state = Simple;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo->accessType) {
    case access_unset:
        return GetByIdStatus(NoInformation);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
        if (structure->takesSlowPathInDFGForImpureProperty())
            return GetByIdStatus(slowPathState, true);
        unsigned attributesIgnored;
        GetByIdVariant variant;
        variant.m_offset = structure->getConcurrently(uid, attributesIgnored);
        if (!isValidOffset(variant.m_offset))
            return GetByIdStatus(slowPathState, true);
        
        variant.m_structureSet.add(structure);
        bool didAppend = result.appendVariant(variant);
        ASSERT_UNUSED(didAppend, didAppend);
        return result;
    }
        
    case access_get_by_id_list: {
        for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
            Structure* structure = list->at(listIndex).structure();
            
            ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
                profiledBlock, structure, list->at(listIndex).chain(),
                list->at(listIndex).chainCount(), uid);
             
            switch (complexGetStatus.kind()) {
            case ComplexGetStatus::ShouldSkip:
                continue;
                 
            case ComplexGetStatus::TakesSlowPath:
                return GetByIdStatus(slowPathState, true);
                 
            case ComplexGetStatus::Inlineable: {
                std::unique_ptr<CallLinkStatus> callLinkStatus;
                switch (list->at(listIndex).type()) {
                case GetByIdAccess::SimpleInline:
                case GetByIdAccess::SimpleStub: {
                    break;
                }
                case GetByIdAccess::Getter: {
                    AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>(
                        list->at(listIndex).stubRoutine());
                    callLinkStatus = std::make_unique<CallLinkStatus>(
                        CallLinkStatus::computeFor(
                            locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData));
                    break;
                }
                case GetByIdAccess::CustomGetter:
                case GetByIdAccess::WatchedStub:{
                    // FIXME: It would be totally sweet to support this at some point in the future.
                    // https://bugs.webkit.org/show_bug.cgi?id=133052
                    return GetByIdStatus(slowPathState, true);
                }
                default:
                    RELEASE_ASSERT_NOT_REACHED();
                }
                 
                GetByIdVariant variant(
                    StructureSet(structure), complexGetStatus.offset(), complexGetStatus.chain(),
                    std::move(callLinkStatus));
                 
                if (!result.appendVariant(variant))
                    return GetByIdStatus(slowPathState, true);
                break;
            } }
        }
        
        return result;
    }
        
    default:
        return GetByIdStatus(slowPathState, true);
    }
    
    RELEASE_ASSERT_NOT_REACHED();
    return GetByIdStatus();
}
Пример #13
0
GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
    const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
    CallLinkStatus::ExitSiteData callExitSiteData)
{
    if (!stubInfo || !stubInfo->everConsidered)
        return GetByIdStatus(NoInformation);

    PolymorphicAccess* list = 0;
    State slowPathState = TakesSlowPath;
    if (stubInfo->cacheType == CacheType::Stub) {
        list = stubInfo->u.stub;
        for (unsigned i = 0; i < list->size(); ++i) {
            const AccessCase& access = list->at(i);
            if (access.doesCalls())
                slowPathState = MakesCalls;
        }
    }
    
    if (stubInfo->tookSlowPath)
        return GetByIdStatus(slowPathState);
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_state = Simple;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo->cacheType) {
    case CacheType::Unset:
        return GetByIdStatus(NoInformation);
        
    case CacheType::GetByIdSelf: {
        Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get();
        if (structure->takesSlowPathInDFGForImpureProperty())
            return GetByIdStatus(slowPathState, true);
        unsigned attributesIgnored;
        GetByIdVariant variant;
        variant.m_offset = structure->getConcurrently(uid, attributesIgnored);
        if (!isValidOffset(variant.m_offset))
            return GetByIdStatus(slowPathState, true);
        
        variant.m_structureSet.add(structure);
        bool didAppend = result.appendVariant(variant);
        ASSERT_UNUSED(didAppend, didAppend);
        return result;
    }
        
    case CacheType::Stub: {
        for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
            const AccessCase& access = list->at(listIndex);
            if (access.viaProxy())
                return GetByIdStatus(slowPathState, true);
            
            Structure* structure = access.structure();
            if (!structure) {
                // The null structure cases arise due to array.length and string.length. We have no way
                // of creating a GetByIdVariant for those, and we don't really have to since the DFG
                // handles those cases in FixupPhase using value profiling. That's a bit awkward - we
                // shouldn't have to use value profiling to discover something that the AccessCase
                // could have told us. But, it works well enough. So, our only concern here is to not
                // crash on null structure.
                return GetByIdStatus(slowPathState, true);
            }
            
            ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
                structure, access.conditionSet(), uid);
             
            switch (complexGetStatus.kind()) {
            case ComplexGetStatus::ShouldSkip:
                continue;
                 
            case ComplexGetStatus::TakesSlowPath:
                return GetByIdStatus(slowPathState, true);
                 
            case ComplexGetStatus::Inlineable: {
                std::unique_ptr<CallLinkStatus> callLinkStatus;
                JSFunction* intrinsicFunction = nullptr;

                switch (access.type()) {
                case AccessCase::Load: {
                    break;
                }
                case AccessCase::IntrinsicGetter: {
                    intrinsicFunction = access.intrinsicFunction();
                    break;
                }
                case AccessCase::Getter: {
                    CallLinkInfo* callLinkInfo = access.callLinkInfo();
                    ASSERT(callLinkInfo);
                    callLinkStatus = std::make_unique<CallLinkStatus>(
                        CallLinkStatus::computeFor(
                            locker, profiledBlock, *callLinkInfo, callExitSiteData));
                    break;
                }
                default: {
                    // FIXME: It would be totally sweet to support more of these at some point in the
                    // future. https://bugs.webkit.org/show_bug.cgi?id=133052
                    return GetByIdStatus(slowPathState, true);
                } }
                 
                GetByIdVariant variant(
                    StructureSet(structure), complexGetStatus.offset(),
                    complexGetStatus.conditionSet(), WTFMove(callLinkStatus),
                    intrinsicFunction);

                if (!result.appendVariant(variant))
                    return GetByIdStatus(slowPathState, true);
                break;
            } }
        }
        
        return result;
    }
        
    default:
        return GetByIdStatus(slowPathState, true);
    }
    
    RELEASE_ASSERT_NOT_REACHED();
    return GetByIdStatus();
}
Пример #14
0
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
{
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(ident);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
    if (!profiledBlock->numberOfStructureStubInfos())
        return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
    
    // First check if it makes either calls, in which case we want to be super careful, or
    // if it's not set at all, in which case we punt.
    StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex);
    if (!stubInfo.seen)
        return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
    
    if (stubInfo.resetByGC)
        return GetByIdStatus(TakesSlowPath, true);

    PolymorphicAccessStructureList* list;
    int listSize;
    switch (stubInfo.accessType) {
    case access_get_by_id_self_list:
        list = stubInfo.u.getByIdSelfList.structureList;
        listSize = stubInfo.u.getByIdSelfList.listSize;
        break;
    case access_get_by_id_proto_list:
        list = stubInfo.u.getByIdProtoList.structureList;
        listSize = stubInfo.u.getByIdProtoList.listSize;
        break;
    default:
        list = 0;
        listSize = 0;
        break;
    }
    for (int i = 0; i < listSize; ++i) {
        if (!list->list[i].isDirect)
            return GetByIdStatus(MakesCalls, true);
    }
    
    // Next check if it takes slow case, in which case we want to be kind of careful.
    if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
        return GetByIdStatus(TakesSlowPath, true);
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo.accessType) {
    case access_unset:
        return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
        unsigned attributesIgnored;
        JSCell* specificValue;
        result.m_offset = structure->get(
            *profiledBlock->vm(), ident, attributesIgnored, specificValue);
        if (structure->isDictionary())
            specificValue = 0;
        
        if (isValidOffset(result.m_offset)) {
            result.m_structureSet.add(structure);
            result.m_specificValue = JSValue(specificValue);
        }
        
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_self_list: {
        for (int i = 0; i < listSize; ++i) {
            ASSERT(list->list[i].isDirect);
            
            Structure* structure = list->list[i].base.get();
            if (result.m_structureSet.contains(structure))
                continue;
            
            unsigned attributesIgnored;
            JSCell* specificValue;
            PropertyOffset myOffset = structure->get(
                *profiledBlock->vm(), ident, attributesIgnored, specificValue);
            if (structure->isDictionary())
                specificValue = 0;
            
            if (!isValidOffset(myOffset)) {
                result.m_offset = invalidOffset;
                break;
            }
                    
            if (!i) {
                result.m_offset = myOffset;
                result.m_specificValue = JSValue(specificValue);
            } else if (result.m_offset != myOffset) {
                result.m_offset = invalidOffset;
                break;
            } else if (result.m_specificValue != JSValue(specificValue))
                result.m_specificValue = JSValue();
            
            result.m_structureSet.add(structure);
        }
                    
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_proto: {
        if (!stubInfo.u.getByIdProto.isDirect)
            return GetByIdStatus(MakesCalls, true);
        result.m_chain.append(stubInfo.u.getByIdProto.prototypeStructure.get());
        computeForChain(
            result, profiledBlock, ident,
            stubInfo.u.getByIdProto.baseObjectStructure.get());
        break;
    }
        
    case access_get_by_id_chain: {
        if (!stubInfo.u.getByIdChain.isDirect)
            return GetByIdStatus(MakesCalls, true);
        for (unsigned i = 0; i < stubInfo.u.getByIdChain.count; ++i)
            result.m_chain.append(stubInfo.u.getByIdChain.chain->head()[i].get());
        computeForChain(
            result, profiledBlock, ident,
            stubInfo.u.getByIdChain.baseObjectStructure.get());
        break;
    }
        
    default:
        ASSERT(!isValidOffset(result.m_offset));
        break;
    }
    
    if (!isValidOffset(result.m_offset)) {
        result.m_state = TakesSlowPath;
        result.m_structureSet.clear();
        result.m_chain.clear();
        result.m_specificValue = JSValue();
    } else
        result.m_state = Simple;
    
    return result;
#else // ENABLE(JIT)
    return GetByIdStatus(NoInformation, false);
#endif // ENABLE(JIT)
}
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
{
    ConcurrentJITLocker locker(profiledBlock->m_lock);
    
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(uid);
#if ENABLE(JIT)
    StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
    if (!stubInfo || !stubInfo->seen)
        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
    
    if (stubInfo->resetByGC)
        return GetByIdStatus(TakesSlowPath, true);

    PolymorphicAccessStructureList* list;
    int listSize;
    switch (stubInfo->accessType) {
    case access_get_by_id_self_list:
        list = stubInfo->u.getByIdSelfList.structureList;
        listSize = stubInfo->u.getByIdSelfList.listSize;
        break;
    case access_get_by_id_proto_list:
        list = stubInfo->u.getByIdProtoList.structureList;
        listSize = stubInfo->u.getByIdProtoList.listSize;
        break;
    default:
        list = 0;
        listSize = 0;
        break;
    }
    for (int i = 0; i < listSize; ++i) {
        if (!list->list[i].isDirect)
            return GetByIdStatus(MakesCalls, true);
    }
    
    // Next check if it takes slow case, in which case we want to be kind of careful.
    if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
        return GetByIdStatus(TakesSlowPath, true);
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo->accessType) {
    case access_unset:
        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
        if (structure->takesSlowPathInDFGForImpureProperty())
            return GetByIdStatus(TakesSlowPath, true);
        unsigned attributesIgnored;
        JSCell* specificValue;
        result.m_offset = structure->getConcurrently(
            *profiledBlock->vm(), uid, attributesIgnored, specificValue);
        if (structure->isDictionary())
            specificValue = 0;
        
        if (isValidOffset(result.m_offset)) {
            result.m_structureSet.add(structure);
            result.m_specificValue = JSValue(specificValue);
        }
        
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_self_list: {
        for (int i = 0; i < listSize; ++i) {
            ASSERT(list->list[i].isDirect);
            
            Structure* structure = list->list[i].base.get();
            if (structure->takesSlowPathInDFGForImpureProperty())
                return GetByIdStatus(TakesSlowPath, true);

            if (result.m_structureSet.contains(structure))
                continue;
            
            unsigned attributesIgnored;
            JSCell* specificValue;
            PropertyOffset myOffset = structure->getConcurrently(
                *profiledBlock->vm(), uid, attributesIgnored, specificValue);
            if (structure->isDictionary())
                specificValue = 0;
            
            if (!isValidOffset(myOffset)) {
                result.m_offset = invalidOffset;
                break;
            }
                    
            if (!i) {
                result.m_offset = myOffset;
                result.m_specificValue = JSValue(specificValue);
            } else if (result.m_offset != myOffset) {
                result.m_offset = invalidOffset;
                break;
            } else if (result.m_specificValue != JSValue(specificValue))
                result.m_specificValue = JSValue();
            
            result.m_structureSet.add(structure);
        }
                    
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_proto: {
        if (!stubInfo->u.getByIdProto.isDirect)
            return GetByIdStatus(MakesCalls, true);
        result.m_chain = adoptRef(new IntendedStructureChain(
            profiledBlock,
            stubInfo->u.getByIdProto.baseObjectStructure.get(),
            stubInfo->u.getByIdProto.prototypeStructure.get()));
        computeForChain(result, profiledBlock, uid);
        break;
    }
        
    case access_get_by_id_chain: {
        if (!stubInfo->u.getByIdChain.isDirect)
            return GetByIdStatus(MakesCalls, true);
        result.m_chain = adoptRef(new IntendedStructureChain(
            profiledBlock,
            stubInfo->u.getByIdChain.baseObjectStructure.get(),
            stubInfo->u.getByIdChain.chain.get(),
            stubInfo->u.getByIdChain.count));
        computeForChain(result, profiledBlock, uid);
        break;
    }
        
    default:
        ASSERT(!isValidOffset(result.m_offset));
        break;
    }
    
    if (!isValidOffset(result.m_offset)) {
        result.m_state = TakesSlowPath;
        result.m_structureSet.clear();
        result.m_chain.clear();
        result.m_specificValue = JSValue();
    } else
        result.m_state = Simple;
    
    return result;
#else // ENABLE(JIT)
    UNUSED_PARAM(map);
    return GetByIdStatus(NoInformation, false);
#endif // ENABLE(JIT)
}
Пример #16
0
GetByIdStatus GetByIdStatus::computeForStubInfo(
    const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
    StringImpl* uid)
{
    if (!stubInfo || !stubInfo->seen)
        return GetByIdStatus(NoInformation);
    
    if (stubInfo->resetByGC)
        return GetByIdStatus(TakesSlowPath, true);

    PolymorphicGetByIdList* list = 0;
    if (stubInfo->accessType == access_get_by_id_list) {
        list = stubInfo->u.getByIdList.list;
        for (unsigned i = 0; i < list->size(); ++i) {
            if (list->at(i).doesCalls())
                return GetByIdStatus(MakesCalls, true);
        }
    }
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_state = Simple;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo->accessType) {
    case access_unset:
        return GetByIdStatus(NoInformation);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
        if (structure->takesSlowPathInDFGForImpureProperty())
            return GetByIdStatus(TakesSlowPath, true);
        unsigned attributesIgnored;
        JSCell* specificValue;
        GetByIdVariant variant;
        variant.m_offset = structure->getConcurrently(
            *profiledBlock->vm(), uid, attributesIgnored, specificValue);
        if (!isValidOffset(variant.m_offset))
            return GetByIdStatus(TakesSlowPath, true);
        
        if (structure->isDictionary())
            specificValue = 0;
        
        variant.m_structureSet.add(structure);
        variant.m_specificValue = JSValue(specificValue);
        result.appendVariant(variant);
        return result;
    }
        
    case access_get_by_id_list: {
        for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
            ASSERT(!list->at(listIndex).doesCalls());
            
            Structure* structure = list->at(listIndex).structure();
            if (structure->takesSlowPathInDFGForImpureProperty())
                return GetByIdStatus(TakesSlowPath, true);
            
            if (list->at(listIndex).chain()) {
                RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain(
                    profiledBlock, structure, list->at(listIndex).chain(),
                    list->at(listIndex).chainCount()));
                if (!result.computeForChain(profiledBlock, uid, chain))
                    return GetByIdStatus(TakesSlowPath, true);
                continue;
            }
            
            unsigned attributesIgnored;
            JSCell* specificValue;
            PropertyOffset myOffset = structure->getConcurrently(
                *profiledBlock->vm(), uid, attributesIgnored, specificValue);
            if (structure->isDictionary())
                specificValue = 0;
            
            if (!isValidOffset(myOffset))
                return GetByIdStatus(TakesSlowPath, true);

            bool found = false;
            for (unsigned variantIndex = 0; variantIndex < result.m_variants.size(); ++variantIndex) {
                GetByIdVariant& variant = result.m_variants[variantIndex];
                if (variant.m_chain)
                    continue;
                
                if (variant.m_offset != myOffset)
                    continue;

                found = true;
                if (variant.m_structureSet.contains(structure))
                    break;
                
                if (variant.m_specificValue != JSValue(specificValue))
                    variant.m_specificValue = JSValue();
                
                variant.m_structureSet.add(structure);
                break;
            }
            
            if (found)
                continue;
            
            if (!result.appendVariant(GetByIdVariant(StructureSet(structure), myOffset, specificValue)))
                return GetByIdStatus(TakesSlowPath, true);
        }
        
        return result;
    }
        
    case access_get_by_id_chain: {
        if (!stubInfo->u.getByIdChain.isDirect)
            return GetByIdStatus(MakesCalls, true);
        RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain(
            profiledBlock,
            stubInfo->u.getByIdChain.baseObjectStructure.get(),
            stubInfo->u.getByIdChain.chain.get(),
            stubInfo->u.getByIdChain.count));
        if (result.computeForChain(profiledBlock, uid, chain))
            return result;
        return GetByIdStatus(TakesSlowPath, true);
    }
        
    default:
        return GetByIdStatus(TakesSlowPath, true);
    }
    
    RELEASE_ASSERT_NOT_REACHED();
    return GetByIdStatus();
}