std::unique_ptr<PolyProtoAccessChain> PolyProtoAccessChain::create(JSGlobalObject* globalObject, JSCell* base, JSObject* target, bool& usesPolyProto)
{
    JSCell* current = base;
    VM& vm = *base->vm();

    bool found = false;

    usesPolyProto = false;

    std::unique_ptr<PolyProtoAccessChain> result(new PolyProtoAccessChain());

    for (unsigned iterationNumber = 0; true; ++iterationNumber) {
        Structure* structure = current->structure(vm);

        if (!structure->propertyAccessesAreCacheable())
            return nullptr;

        if (structure->isProxy())
            return nullptr;

        if (structure->isDictionary()) {
            ASSERT(structure->isObject());
            if (structure->hasBeenFlattenedBefore())
                return nullptr;

            structure->flattenDictionaryStructure(vm, asObject(current));
        }

        // To save memory, we don't include the base in the chain. We let
        // AccessCase provide the base to us as needed.
        if (iterationNumber)
            result->m_chain.append(structure);
        else
            RELEASE_ASSERT(current == base);

        if (current == target) {
            found = true;
            break;
        }

        // We only have poly proto if we need to access our prototype via
        // the poly proto protocol. If the slot base is the only poly proto
        // thing in the chain, and we have a cache hit on it, then we're not
        // poly proto.
        usesPolyProto |= structure->hasPolyProto();

        JSValue prototype = structure->prototypeForLookup(globalObject, current);
        if (prototype.isNull())
            break;
        current = asObject(prototype);
    }

    if (!found && !!target)
        return nullptr;

    return result;
}
void IntendedStructureChain::gatherChecks(ConstantStructureCheckVector& vector) const
{
    JSValue currentPrototype = m_prototype;
    for (unsigned i = 0; i < size(); ++i) {
        JSObject* currentObject = asObject(currentPrototype);
        Structure* currentStructure = at(i);
        vector.append(ConstantStructureCheck(currentObject, currentStructure));
        currentPrototype = currentStructure->prototypeForLookup(m_globalObject);
    }
}
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
{
    ASSERT(count);
    
    Vector<JmpSrc> bucketsOfFail;

    // Check eax is an object of the right Structure.
    JmpSrc baseObjectCheck = checkStructure(X86::eax, structure);
    bucketsOfFail.append(baseObjectCheck);

    Structure* currStructure = structure;
    RefPtr<Structure>* chainEntries = chain->head();
    JSObject* protoObject = 0;
    for (unsigned i = 0; i < count; ++i) {
        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
        currStructure = chainEntries[i].get();

        // Check the prototype object's Structure had not changed.
        Structure** prototypeStructureAddress = &(protoObject->m_structure);
        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
        bucketsOfFail.append(__ jne());
    }
    ASSERT(protoObject);

    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    JmpSrc success = __ jmp();

    void* code = __ executableCopy(m_codeBlock->executablePool());

    // Use the repatch information to link the failure cases back to the original slow case routine.
    void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;

    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
        X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin);

    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));

    // Track the stub we have created so that it will be deleted later.
    structure->ref();
    chain->ref();
    prototypeStructures->list[currentIndex].set(code, structure, chain);

    // Finally repatch the jump to slow case back in the hot path to jump here instead.
    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    X86Assembler::repatchBranchOffset(jmpLocation, code);
}
Example #4
0
void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, Identifier& ident, Structure* structure)
{
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
    // Validate the chain. If the chain is invalid, then currently the best thing
    // we can do is to assume that TakesSlow is true. In the future, it might be
    // worth exploring reifying the structure chain from the structure we've got
    // instead of using the one from the cache, since that will do the right things
    // if the structure chain has changed. But that may be harder, because we may
    // then end up having a different type of access altogether. And it currently
    // does not appear to be worth it to do so -- effectively, the heuristic we
    // have now is that if the structure chain has changed between when it was
    // cached on in the baseline JIT and when the DFG tried to inline the access,
    // then we fall back on a polymorphic access.
    Structure* currentStructure = structure;
    JSObject* currentObject = 0;
    for (unsigned i = 0; i < result.m_chain.size(); ++i) {
        ASSERT(!currentStructure->isDictionary());
        currentObject = asObject(currentStructure->prototypeForLookup(profiledBlock));
        currentStructure = result.m_chain[i];
        if (currentObject->structure() != currentStructure)
            return;
    }
        
    ASSERT(currentObject);
        
    unsigned attributesIgnored;
    JSCell* specificValue;
        
    result.m_offset = currentStructure->get(
        *profiledBlock->vm(), ident, attributesIgnored, specificValue);
    if (currentStructure->isDictionary())
        specificValue = 0;
    if (!isValidOffset(result.m_offset))
        return;
        
    result.m_structureSet.add(structure);
    result.m_specificValue = JSValue(specificValue);
#else
    UNUSED_PARAM(result);
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(ident);
    UNUSED_PARAM(structure);
    UNREACHABLE_FOR_PLATFORM();
#endif
}
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
{
#if USE(CTI_REPATCH_PIC)
    // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));

    ASSERT(count);
    
    Vector<JmpSrc> bucketsOfFail;

    // Check eax is an object of the right Structure.
    JmpSrc baseObjectCheck = checkStructure(X86::eax, structure);
    bucketsOfFail.append(baseObjectCheck);

    Structure* currStructure = structure;
    RefPtr<Structure>* chainEntries = chain->head();
    JSObject* protoObject = 0;
    for (unsigned i = 0; i < count; ++i) {
        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
        currStructure = chainEntries[i].get();

        // Check the prototype object's Structure had not changed.
        Structure** prototypeStructureAddress = &(protoObject->m_structure);
        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
        bucketsOfFail.append(__ jne());
    }
    ASSERT(protoObject);

    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    JmpSrc success = __ jmp();

    void* code = __ executableCopy(m_codeBlock->executablePool());

    // Use the repatch information to link the failure cases back to the original slow case routine.
    void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;

    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
        X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin);

    // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));

    // Track the stub we have created so that it will be deleted later.
    stubInfo->stubRoutine = code;

    // Finally repatch the jump to slow case back in the hot path to jump here instead.
    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
    X86Assembler::repatchBranchOffset(jmpLocation, code);
#else
    ASSERT(count);
    
    Vector<JmpSrc> bucketsOfFail;

    // Check eax is an object of the right Structure.
    __ testl_i32r(JSImmediate::TagMask, X86::eax);
    bucketsOfFail.append(__ jne());
    bucketsOfFail.append(checkStructure(X86::eax, structure));

    Structure* currStructure = structure;
    RefPtr<Structure>* chainEntries = chain->head();
    JSObject* protoObject = 0;
    for (unsigned i = 0; i < count; ++i) {
        protoObject = asObject(currStructure->prototypeForLookup(callFrame));
        currStructure = chainEntries[i].get();

        // Check the prototype object's Structure had not changed.
        Structure** prototypeStructureAddress = &(protoObject->m_structure);
        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
        bucketsOfFail.append(__ jne());
    }
    ASSERT(protoObject);

    PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
    __ ret();

    void* code = __ executableCopy(m_codeBlock->executablePool());

    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
        X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));

    stubInfo->stubRoutine = code;

    ctiRepatchCallByReturnAddress(returnAddress, code);
#endif
}