Example #1
0
GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
{
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(ident);
#if ENABLE(LLINT)
    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
    
    if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length))
        return GetByIdStatus(NoInformation, false);

    Structure* structure = instruction[4].u.structure.get();
    if (!structure)
        return GetByIdStatus(NoInformation, false);
    
    unsigned attributesIgnored;
    JSCell* specificValue;
    PropertyOffset offset = structure->get(
        *profiledBlock->vm(), ident, attributesIgnored, specificValue);
    if (structure->isDictionary())
        specificValue = 0;
    if (!isValidOffset(offset))
        return GetByIdStatus(NoInformation, false);
    
    return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
#else
    return GetByIdStatus(NoInformation, false);
#endif
}
Example #2
0
void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, Identifier& ident, Structure* structure)
{
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
    // Validate the chain. If the chain is invalid, then currently the best thing
    // we can do is to assume that TakesSlow is true. In the future, it might be
    // worth exploring reifying the structure chain from the structure we've got
    // instead of using the one from the cache, since that will do the right things
    // if the structure chain has changed. But that may be harder, because we may
    // then end up having a different type of access altogether. And it currently
    // does not appear to be worth it to do so -- effectively, the heuristic we
    // have now is that if the structure chain has changed between when it was
    // cached on in the baseline JIT and when the DFG tried to inline the access,
    // then we fall back on a polymorphic access.
    Structure* currentStructure = structure;
    JSObject* currentObject = 0;
    for (unsigned i = 0; i < result.m_chain.size(); ++i) {
        ASSERT(!currentStructure->isDictionary());
        currentObject = asObject(currentStructure->prototypeForLookup(profiledBlock));
        currentStructure = result.m_chain[i];
        if (currentObject->structure() != currentStructure)
            return;
    }
        
    ASSERT(currentObject);
        
    unsigned attributesIgnored;
    JSCell* specificValue;
        
    result.m_offset = currentStructure->get(
        *profiledBlock->vm(), ident, attributesIgnored, specificValue);
    if (currentStructure->isDictionary())
        specificValue = 0;
    if (!isValidOffset(result.m_offset))
        return;
        
    result.m_structureSet.add(structure);
    result.m_specificValue = JSValue(specificValue);
#else
    UNUSED_PARAM(result);
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(ident);
    UNUSED_PARAM(structure);
    UNREACHABLE_FOR_PLATFORM();
#endif
}
std::unique_ptr<PolyProtoAccessChain> PolyProtoAccessChain::create(JSGlobalObject* globalObject, JSCell* base, JSObject* target, bool& usesPolyProto)
{
    JSCell* current = base;
    VM& vm = *base->vm();

    bool found = false;

    usesPolyProto = false;

    std::unique_ptr<PolyProtoAccessChain> result(new PolyProtoAccessChain());

    for (unsigned iterationNumber = 0; true; ++iterationNumber) {
        Structure* structure = current->structure(vm);

        if (!structure->propertyAccessesAreCacheable())
            return nullptr;

        if (structure->isProxy())
            return nullptr;

        if (structure->isDictionary()) {
            ASSERT(structure->isObject());
            if (structure->hasBeenFlattenedBefore())
                return nullptr;

            structure->flattenDictionaryStructure(vm, asObject(current));
        }

        // To save memory, we don't include the base in the chain. We let
        // AccessCase provide the base to us as needed.
        if (iterationNumber)
            result->m_chain.append(structure);
        else
            RELEASE_ASSERT(current == base);

        if (current == target) {
            found = true;
            break;
        }

        // We only have poly proto if we need to access our prototype via
        // the poly proto protocol. If the slot base is the only poly proto
        // thing in the chain, and we have a cache hit on it, then we're not
        // poly proto.
        usesPolyProto |= structure->hasPolyProto();

        JSValue prototype = structure->prototypeForLookup(globalObject, current);
        if (prototype.isNull())
            break;
        current = asObject(prototype);
    }

    if (!found && !!target)
        return nullptr;

    return result;
}
bool IntendedStructureChain::isNormalized()
{
    for (unsigned i = 0; i < m_vector.size(); ++i) {
        Structure* structure = m_vector[i];
        if (structure->isProxy())
            return false;
        if (structure->isDictionary())
            return false;
    }
    return true;
}
Example #5
0
bool GetByIdStatus::computeForChain(CodeBlock* profiledBlock, StringImpl* uid, PassRefPtr<IntendedStructureChain> passedChain)
{
#if ENABLE(JIT)
    RefPtr<IntendedStructureChain> chain = passedChain;

    // Validate the chain. If the chain is invalid, then currently the best thing
    // we can do is to assume that TakesSlow is true. In the future, it might be
    // worth exploring reifying the structure chain from the structure we've got
    // instead of using the one from the cache, since that will do the right things
    // if the structure chain has changed. But that may be harder, because we may
    // then end up having a different type of access altogether. And it currently
    // does not appear to be worth it to do so -- effectively, the heuristic we
    // have now is that if the structure chain has changed between when it was
    // cached on in the baseline JIT and when the DFG tried to inline the access,
    // then we fall back on a polymorphic access.
    if (!chain->isStillValid())
        return false;

    if (chain->head()->takesSlowPathInDFGForImpureProperty())
        return false;
    size_t chainSize = chain->size();
    for (size_t i = 0; i < chainSize; i++) {
        if (chain->at(i)->takesSlowPathInDFGForImpureProperty())
            return false;
    }

    JSObject* currentObject = chain->terminalPrototype();
    Structure* currentStructure = chain->last();

    ASSERT_UNUSED(currentObject, currentObject);

    unsigned attributesIgnored;
    JSCell* specificValue;

    PropertyOffset offset = currentStructure->getConcurrently(
                                *profiledBlock->vm(), uid, attributesIgnored, specificValue);
    if (currentStructure->isDictionary())
        specificValue = 0;
    if (!isValidOffset(offset))
        return false;

    m_variants.append(
        GetByIdVariant(StructureSet(chain->head()), offset, specificValue, chain));
    return true;
#else // ENABLE(JIT)
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(uid);
    UNUSED_PARAM(passedChain);
    UNREACHABLE_FOR_PLATFORM();
    return false;
#endif // ENABLE(JIT)
}
Example #6
0
bool IntendedStructureChain::isNormalized()
{
    if (m_head->typeInfo().type() == ProxyType)
        return false;
    for (unsigned i = 0; i < m_vector.size(); ++i) {
        Structure* structure = m_vector[i];
        if (structure->typeInfo().type() == ProxyType)
            return false;
        if (structure->isDictionary())
            return false;
    }
    return true;
}
Example #7
0
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
{
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(ident);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
    if (!profiledBlock->numberOfStructureStubInfos())
        return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
    
    // First check if it makes either calls, in which case we want to be super careful, or
    // if it's not set at all, in which case we punt.
    StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex);
    if (!stubInfo.seen)
        return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
    
    if (stubInfo.resetByGC)
        return GetByIdStatus(TakesSlowPath, true);

    PolymorphicAccessStructureList* list;
    int listSize;
    switch (stubInfo.accessType) {
    case access_get_by_id_self_list:
        list = stubInfo.u.getByIdSelfList.structureList;
        listSize = stubInfo.u.getByIdSelfList.listSize;
        break;
    case access_get_by_id_proto_list:
        list = stubInfo.u.getByIdProtoList.structureList;
        listSize = stubInfo.u.getByIdProtoList.listSize;
        break;
    default:
        list = 0;
        listSize = 0;
        break;
    }
    for (int i = 0; i < listSize; ++i) {
        if (!list->list[i].isDirect)
            return GetByIdStatus(MakesCalls, true);
    }
    
    // Next check if it takes slow case, in which case we want to be kind of careful.
    if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
        return GetByIdStatus(TakesSlowPath, true);
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo.accessType) {
    case access_unset:
        return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
        unsigned attributesIgnored;
        JSCell* specificValue;
        result.m_offset = structure->get(
            *profiledBlock->vm(), ident, attributesIgnored, specificValue);
        if (structure->isDictionary())
            specificValue = 0;
        
        if (isValidOffset(result.m_offset)) {
            result.m_structureSet.add(structure);
            result.m_specificValue = JSValue(specificValue);
        }
        
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_self_list: {
        for (int i = 0; i < listSize; ++i) {
            ASSERT(list->list[i].isDirect);
            
            Structure* structure = list->list[i].base.get();
            if (result.m_structureSet.contains(structure))
                continue;
            
            unsigned attributesIgnored;
            JSCell* specificValue;
            PropertyOffset myOffset = structure->get(
                *profiledBlock->vm(), ident, attributesIgnored, specificValue);
            if (structure->isDictionary())
                specificValue = 0;
            
            if (!isValidOffset(myOffset)) {
                result.m_offset = invalidOffset;
                break;
            }
                    
            if (!i) {
                result.m_offset = myOffset;
                result.m_specificValue = JSValue(specificValue);
            } else if (result.m_offset != myOffset) {
                result.m_offset = invalidOffset;
                break;
            } else if (result.m_specificValue != JSValue(specificValue))
                result.m_specificValue = JSValue();
            
            result.m_structureSet.add(structure);
        }
                    
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_proto: {
        if (!stubInfo.u.getByIdProto.isDirect)
            return GetByIdStatus(MakesCalls, true);
        result.m_chain.append(stubInfo.u.getByIdProto.prototypeStructure.get());
        computeForChain(
            result, profiledBlock, ident,
            stubInfo.u.getByIdProto.baseObjectStructure.get());
        break;
    }
        
    case access_get_by_id_chain: {
        if (!stubInfo.u.getByIdChain.isDirect)
            return GetByIdStatus(MakesCalls, true);
        for (unsigned i = 0; i < stubInfo.u.getByIdChain.count; ++i)
            result.m_chain.append(stubInfo.u.getByIdChain.chain->head()[i].get());
        computeForChain(
            result, profiledBlock, ident,
            stubInfo.u.getByIdChain.baseObjectStructure.get());
        break;
    }
        
    default:
        ASSERT(!isValidOffset(result.m_offset));
        break;
    }
    
    if (!isValidOffset(result.m_offset)) {
        result.m_state = TakesSlowPath;
        result.m_structureSet.clear();
        result.m_chain.clear();
        result.m_specificValue = JSValue();
    } else
        result.m_state = Simple;
    
    return result;
#else // ENABLE(JIT)
    return GetByIdStatus(NoInformation, false);
#endif // ENABLE(JIT)
}
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
{
    ConcurrentJITLocker locker(profiledBlock->m_lock);
    
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(bytecodeIndex);
    UNUSED_PARAM(uid);
#if ENABLE(JIT)
    StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
    if (!stubInfo || !stubInfo->seen)
        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
    
    if (stubInfo->resetByGC)
        return GetByIdStatus(TakesSlowPath, true);

    PolymorphicAccessStructureList* list;
    int listSize;
    switch (stubInfo->accessType) {
    case access_get_by_id_self_list:
        list = stubInfo->u.getByIdSelfList.structureList;
        listSize = stubInfo->u.getByIdSelfList.listSize;
        break;
    case access_get_by_id_proto_list:
        list = stubInfo->u.getByIdProtoList.structureList;
        listSize = stubInfo->u.getByIdProtoList.listSize;
        break;
    default:
        list = 0;
        listSize = 0;
        break;
    }
    for (int i = 0; i < listSize; ++i) {
        if (!list->list[i].isDirect)
            return GetByIdStatus(MakesCalls, true);
    }
    
    // Next check if it takes slow case, in which case we want to be kind of careful.
    if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
        return GetByIdStatus(TakesSlowPath, true);
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo->accessType) {
    case access_unset:
        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
        if (structure->takesSlowPathInDFGForImpureProperty())
            return GetByIdStatus(TakesSlowPath, true);
        unsigned attributesIgnored;
        JSCell* specificValue;
        result.m_offset = structure->getConcurrently(
            *profiledBlock->vm(), uid, attributesIgnored, specificValue);
        if (structure->isDictionary())
            specificValue = 0;
        
        if (isValidOffset(result.m_offset)) {
            result.m_structureSet.add(structure);
            result.m_specificValue = JSValue(specificValue);
        }
        
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_self_list: {
        for (int i = 0; i < listSize; ++i) {
            ASSERT(list->list[i].isDirect);
            
            Structure* structure = list->list[i].base.get();
            if (structure->takesSlowPathInDFGForImpureProperty())
                return GetByIdStatus(TakesSlowPath, true);

            if (result.m_structureSet.contains(structure))
                continue;
            
            unsigned attributesIgnored;
            JSCell* specificValue;
            PropertyOffset myOffset = structure->getConcurrently(
                *profiledBlock->vm(), uid, attributesIgnored, specificValue);
            if (structure->isDictionary())
                specificValue = 0;
            
            if (!isValidOffset(myOffset)) {
                result.m_offset = invalidOffset;
                break;
            }
                    
            if (!i) {
                result.m_offset = myOffset;
                result.m_specificValue = JSValue(specificValue);
            } else if (result.m_offset != myOffset) {
                result.m_offset = invalidOffset;
                break;
            } else if (result.m_specificValue != JSValue(specificValue))
                result.m_specificValue = JSValue();
            
            result.m_structureSet.add(structure);
        }
                    
        if (isValidOffset(result.m_offset))
            ASSERT(result.m_structureSet.size());
        break;
    }
        
    case access_get_by_id_proto: {
        if (!stubInfo->u.getByIdProto.isDirect)
            return GetByIdStatus(MakesCalls, true);
        result.m_chain = adoptRef(new IntendedStructureChain(
            profiledBlock,
            stubInfo->u.getByIdProto.baseObjectStructure.get(),
            stubInfo->u.getByIdProto.prototypeStructure.get()));
        computeForChain(result, profiledBlock, uid);
        break;
    }
        
    case access_get_by_id_chain: {
        if (!stubInfo->u.getByIdChain.isDirect)
            return GetByIdStatus(MakesCalls, true);
        result.m_chain = adoptRef(new IntendedStructureChain(
            profiledBlock,
            stubInfo->u.getByIdChain.baseObjectStructure.get(),
            stubInfo->u.getByIdChain.chain.get(),
            stubInfo->u.getByIdChain.count));
        computeForChain(result, profiledBlock, uid);
        break;
    }
        
    default:
        ASSERT(!isValidOffset(result.m_offset));
        break;
    }
    
    if (!isValidOffset(result.m_offset)) {
        result.m_state = TakesSlowPath;
        result.m_structureSet.clear();
        result.m_chain.clear();
        result.m_specificValue = JSValue();
    } else
        result.m_state = Simple;
    
    return result;
#else // ENABLE(JIT)
    UNUSED_PARAM(map);
    return GetByIdStatus(NoInformation, false);
#endif // ENABLE(JIT)
}
Example #9
0
GetByIdStatus GetByIdStatus::computeForStubInfo(
    const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
    StringImpl* uid)
{
    if (!stubInfo || !stubInfo->seen)
        return GetByIdStatus(NoInformation);
    
    if (stubInfo->resetByGC)
        return GetByIdStatus(TakesSlowPath, true);

    PolymorphicGetByIdList* list = 0;
    if (stubInfo->accessType == access_get_by_id_list) {
        list = stubInfo->u.getByIdList.list;
        for (unsigned i = 0; i < list->size(); ++i) {
            if (list->at(i).doesCalls())
                return GetByIdStatus(MakesCalls, true);
        }
    }
    
    // Finally figure out if we can derive an access strategy.
    GetByIdStatus result;
    result.m_state = Simple;
    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
    switch (stubInfo->accessType) {
    case access_unset:
        return GetByIdStatus(NoInformation);
        
    case access_get_by_id_self: {
        Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
        if (structure->takesSlowPathInDFGForImpureProperty())
            return GetByIdStatus(TakesSlowPath, true);
        unsigned attributesIgnored;
        JSCell* specificValue;
        GetByIdVariant variant;
        variant.m_offset = structure->getConcurrently(
            *profiledBlock->vm(), uid, attributesIgnored, specificValue);
        if (!isValidOffset(variant.m_offset))
            return GetByIdStatus(TakesSlowPath, true);
        
        if (structure->isDictionary())
            specificValue = 0;
        
        variant.m_structureSet.add(structure);
        variant.m_specificValue = JSValue(specificValue);
        result.appendVariant(variant);
        return result;
    }
        
    case access_get_by_id_list: {
        for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
            ASSERT(!list->at(listIndex).doesCalls());
            
            Structure* structure = list->at(listIndex).structure();
            if (structure->takesSlowPathInDFGForImpureProperty())
                return GetByIdStatus(TakesSlowPath, true);
            
            if (list->at(listIndex).chain()) {
                RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain(
                    profiledBlock, structure, list->at(listIndex).chain(),
                    list->at(listIndex).chainCount()));
                if (!result.computeForChain(profiledBlock, uid, chain))
                    return GetByIdStatus(TakesSlowPath, true);
                continue;
            }
            
            unsigned attributesIgnored;
            JSCell* specificValue;
            PropertyOffset myOffset = structure->getConcurrently(
                *profiledBlock->vm(), uid, attributesIgnored, specificValue);
            if (structure->isDictionary())
                specificValue = 0;
            
            if (!isValidOffset(myOffset))
                return GetByIdStatus(TakesSlowPath, true);

            bool found = false;
            for (unsigned variantIndex = 0; variantIndex < result.m_variants.size(); ++variantIndex) {
                GetByIdVariant& variant = result.m_variants[variantIndex];
                if (variant.m_chain)
                    continue;
                
                if (variant.m_offset != myOffset)
                    continue;

                found = true;
                if (variant.m_structureSet.contains(structure))
                    break;
                
                if (variant.m_specificValue != JSValue(specificValue))
                    variant.m_specificValue = JSValue();
                
                variant.m_structureSet.add(structure);
                break;
            }
            
            if (found)
                continue;
            
            if (!result.appendVariant(GetByIdVariant(StructureSet(structure), myOffset, specificValue)))
                return GetByIdStatus(TakesSlowPath, true);
        }
        
        return result;
    }
        
    case access_get_by_id_chain: {
        if (!stubInfo->u.getByIdChain.isDirect)
            return GetByIdStatus(MakesCalls, true);
        RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain(
            profiledBlock,
            stubInfo->u.getByIdChain.baseObjectStructure.get(),
            stubInfo->u.getByIdChain.chain.get(),
            stubInfo->u.getByIdChain.count));
        if (result.computeForChain(profiledBlock, uid, chain))
            return result;
        return GetByIdStatus(TakesSlowPath, true);
    }
        
    default:
        return GetByIdStatus(TakesSlowPath, true);
    }
    
    RELEASE_ASSERT_NOT_REACHED();
    return GetByIdStatus();
}
Example #10
0
PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, StringImpl* uid, bool isDirect)
{
    if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
        return PutByIdStatus(TakesSlowPath);

    if (set.isEmpty())
        return PutByIdStatus();
    
    PutByIdStatus result;
    result.m_state = Simple;
    for (unsigned i = 0; i < set.size(); ++i) {
        Structure* structure = set[i];
        
        if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
            return PutByIdStatus(TakesSlowPath);

        if (!structure->propertyAccessesAreCacheable())
            return PutByIdStatus(TakesSlowPath);
    
        unsigned attributes;
        PropertyOffset offset = structure->getConcurrently(uid, attributes);
        if (isValidOffset(offset)) {
            if (attributes & CustomAccessor)
                return PutByIdStatus(MakesCalls);

            if (attributes & (Accessor | ReadOnly))
                return PutByIdStatus(TakesSlowPath);
            
            WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
            if (!replaceSet || replaceSet->isStillValid()) {
                // When this executes, it'll create, and fire, this replacement watchpoint set.
                // That means that  this has probably never executed or that something fishy is
                // going on. Also, we cannot create or fire the watchpoint set from the concurrent
                // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
                // So, better leave this alone and take slow path.
                return PutByIdStatus(TakesSlowPath);
            }
            
            if (!result.appendVariant(PutByIdVariant::replace(structure, offset)))
                return PutByIdStatus(TakesSlowPath);
            continue;
        }
    
        // Our hypothesis is that we're doing a transition. Before we prove that this is really
        // true, we want to do some sanity checks.
    
        // Don't cache put transitions on dictionaries.
        if (structure->isDictionary())
            return PutByIdStatus(TakesSlowPath);

        // If the structure corresponds to something that isn't an object, then give up, since
        // we don't want to be adding properties to strings.
        if (structure->typeInfo().type() == StringType)
            return PutByIdStatus(TakesSlowPath);
    
        RefPtr<IntendedStructureChain> chain;
        if (!isDirect) {
            chain = adoptRef(new IntendedStructureChain(globalObject, structure));
        
            // If the prototype chain has setters or read-only properties, then give up.
            if (chain->mayInterceptStoreTo(uid))
                return PutByIdStatus(TakesSlowPath);
        
            // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
            // then give up. The dictionary case would only happen if this structure has not been
            // used in an optimized put_by_id transition. And really the only reason why we would
            // bail here is that I don't really feel like having the optimizing JIT go and flatten
            // dictionaries if we have evidence to suggest that those objects were never used as
            // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
            // the other checks below will fail.
            if (structure->isProxy() || !chain->isNormalized())
                return PutByIdStatus(TakesSlowPath);
        }
    
        // We only optimize if there is already a structure that the transition is cached to.
        Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
        if (!transition)
            return PutByIdStatus(TakesSlowPath);
        ASSERT(isValidOffset(offset));
    
        bool didAppend = result.appendVariant(
            PutByIdVariant::transition(structure, transition, chain.get(), offset));
        if (!didAppend)
            return PutByIdStatus(TakesSlowPath);
    }
    
    return result;
}
Example #11
0
PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
{
    if (parseIndex(*uid))
        return PutByIdStatus(TakesSlowPath);

    if (set.isEmpty())
        return PutByIdStatus();
    
    PutByIdStatus result;
    result.m_state = Simple;
    for (unsigned i = 0; i < set.size(); ++i) {
        Structure* structure = set[i];
        
        if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
            return PutByIdStatus(TakesSlowPath);

        if (!structure->propertyAccessesAreCacheable())
            return PutByIdStatus(TakesSlowPath);
    
        unsigned attributes;
        PropertyOffset offset = structure->getConcurrently(uid, attributes);
        if (isValidOffset(offset)) {
            if (attributes & CustomAccessor)
                return PutByIdStatus(MakesCalls);

            if (attributes & (Accessor | ReadOnly))
                return PutByIdStatus(TakesSlowPath);
            
            WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
            if (!replaceSet || replaceSet->isStillValid()) {
                // When this executes, it'll create, and fire, this replacement watchpoint set.
                // That means that  this has probably never executed or that something fishy is
                // going on. Also, we cannot create or fire the watchpoint set from the concurrent
                // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
                // So, better leave this alone and take slow path.
                return PutByIdStatus(TakesSlowPath);
            }

            PutByIdVariant variant =
                PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
            if (!result.appendVariant(variant))
                return PutByIdStatus(TakesSlowPath);
            continue;
        }
    
        // Our hypothesis is that we're doing a transition. Before we prove that this is really
        // true, we want to do some sanity checks.
    
        // Don't cache put transitions on dictionaries.
        if (structure->isDictionary())
            return PutByIdStatus(TakesSlowPath);

        // If the structure corresponds to something that isn't an object, then give up, since
        // we don't want to be adding properties to strings.
        if (!structure->typeInfo().isObject())
            return PutByIdStatus(TakesSlowPath);
    
        ObjectPropertyConditionSet conditionSet;
        if (!isDirect) {
            conditionSet = generateConditionsForPropertySetterMissConcurrently(
                globalObject->vm(), globalObject, structure, uid);
            if (!conditionSet.isValid())
                return PutByIdStatus(TakesSlowPath);
        }
    
        // We only optimize if there is already a structure that the transition is cached to.
        Structure* transition =
            Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
        if (!transition)
            return PutByIdStatus(TakesSlowPath);
        ASSERT(isValidOffset(offset));
    
        bool didAppend = result.appendVariant(
            PutByIdVariant::transition(
                structure, transition, conditionSet, offset,
                transition->inferredTypeDescriptorFor(uid)));
        if (!didAppend)
            return PutByIdStatus(TakesSlowPath);
    }
    
    return result;
}