コード例 #1
0
JITByIdGenerator::JITByIdGenerator(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
    const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value)
    : JITInlineCacheGenerator(codeBlock, codeOrigin, callSite, accessType)
    , m_base(base)
    , m_value(value)
{
    m_stubInfo->patch.usedRegisters = usedRegisters;
    
    m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
    m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
#if USE(JSVALUE32_64)
    m_stubInfo->patch.baseTagGPR = static_cast<int8_t>(base.tagGPR());
    m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
#endif
}
コード例 #2
0
JITGetByIdGenerator::JITGetByIdGenerator(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
    JSValueRegs base, JSValueRegs value, SpillRegistersMode spillMode)
    : JITByIdGenerator(
        codeBlock, codeOrigin, callSite, AccessType::Get, usedRegisters, base, value, spillMode)
{
    RELEASE_ASSERT(base.payloadGPR() != value.tagGPR());
}
コード例 #3
0
JITGetByIdGenerator::JITGetByIdGenerator(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
    UniquedStringImpl* propertyName, JSValueRegs base, JSValueRegs value, AccessType accessType)
    : JITByIdGenerator(codeBlock, codeOrigin, callSite, accessType, usedRegisters, base, value)
    , m_isLengthAccess(propertyName == codeBlock->vm()->propertyNames->length.impl())
{
    RELEASE_ASSERT(base.payloadGPR() != value.tagGPR());
}
コード例 #4
0
JITByIdGenerator::JITByIdGenerator(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
    JSValueRegs base, JSValueRegs value, bool registersFlushed)
    : JITInlineCacheGenerator(codeBlock, codeOrigin)
    , m_base(base)
    , m_value(value)
{
    m_stubInfo->patch.registersFlushed = registersFlushed;
    m_stubInfo->patch.usedRegisters = usedRegisters;
    
    // This is a convenience - in cases where the only registers you're using are base/value,
    // it allows you to pass RegisterSet() as the usedRegisters argument.
    m_stubInfo->patch.usedRegisters.set(base);
    m_stubInfo->patch.usedRegisters.set(value);
    
    m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
    m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
#if USE(JSVALUE32_64)
    m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
#endif
}
コード例 #5
0
JITByIdGenerator::JITByIdGenerator(
    CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
    const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value,
    SpillRegistersMode spillMode)
    : JITInlineCacheGenerator(codeBlock, codeOrigin, callSite, accessType)
    , m_base(base)
    , m_value(value)
{
    m_stubInfo->patch.spillMode = spillMode;
    m_stubInfo->patch.usedRegisters = usedRegisters;
    
    // This is a convenience - in cases where the only registers you're using are base/value,
    // it allows you to pass RegisterSet() as the usedRegisters argument.
    m_stubInfo->patch.usedRegisters.set(base);
    m_stubInfo->patch.usedRegisters.set(value);
    
    m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
    m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
#if USE(JSVALUE32_64)
    m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
#endif
}
コード例 #6
0
AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType(
    JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode)
{
    AssemblyHelpers::JumpList result;

    switch (descriptor.kind()) {
    case InferredType::Bottom:
        result.append(jump());
        break;

    case InferredType::Boolean:
        result.append(branchIfNotBoolean(regs, tempGPR));
        break;

    case InferredType::Other:
        result.append(branchIfNotOther(regs, tempGPR));
        break;

    case InferredType::Int32:
        result.append(branchIfNotInt32(regs, mode));
        break;

    case InferredType::Number:
        result.append(branchIfNotNumber(regs, tempGPR, mode));
        break;

    case InferredType::String:
        result.append(branchIfNotCell(regs, mode));
        result.append(branchIfNotString(regs.payloadGPR()));
        break;

    case InferredType::ObjectWithStructure:
        result.append(branchIfNotCell(regs, mode));
        result.append(
            branchStructure(
                NotEqual,
                Address(regs.payloadGPR(), JSCell::structureIDOffset()),
                descriptor.structure()));
        break;

    case InferredType::ObjectWithStructureOrOther: {
        Jump ok = branchIfOther(regs, tempGPR);
        result.append(branchIfNotCell(regs, mode));
        result.append(
            branchStructure(
                NotEqual,
                Address(regs.payloadGPR(), JSCell::structureIDOffset()),
                descriptor.structure()));
        ok.link(this);
        break;
    }

    case InferredType::Object:
        result.append(branchIfNotCell(regs, mode));
        result.append(branchIfNotObject(regs.payloadGPR()));
        break;

    case InferredType::ObjectOrOther: {
        Jump ok = branchIfOther(regs, tempGPR);
        result.append(branchIfNotCell(regs, mode));
        result.append(branchIfNotObject(regs.payloadGPR()));
        ok.link(this);
        break;
    }

    case InferredType::Top:
        break;
    }

    return result;
}
コード例 #7
0
void GetterSetterAccessCase::emitDOMJITGetter(AccessGenerationState& state, const DOMJIT::GetterSetter* domJIT, GPRReg baseForGetGPR)
{
    CCallHelpers& jit = *state.jit;
    StructureStubInfo& stubInfo = *state.stubInfo;
    JSValueRegs valueRegs = state.valueRegs;
    GPRReg baseGPR = state.baseGPR;
    GPRReg scratchGPR = state.scratchGPR;

    // We construct the environment that can execute the DOMJIT::Snippet here.
    Ref<DOMJIT::CallDOMGetterSnippet> snippet = domJIT->compiler()();

    Vector<GPRReg> gpScratch;
    Vector<FPRReg> fpScratch;
    Vector<SnippetParams::Value> regs;

    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
    allocator.lock(baseGPR);
#if USE(JSVALUE32_64)
    allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
#endif
    allocator.lock(valueRegs);
    allocator.lock(scratchGPR);

    GPRReg paramBaseGPR = InvalidGPRReg;
    GPRReg paramGlobalObjectGPR = InvalidGPRReg;
    JSValueRegs paramValueRegs = valueRegs;
    GPRReg remainingScratchGPR = InvalidGPRReg;

    // valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
    // In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
    // Snippet, Snippet assumes that result registers always early interfere with input registers, in this case,
    // baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
    if (baseForGetGPR != valueRegs.payloadGPR()) {
        paramBaseGPR = baseForGetGPR;
        if (!snippet->requireGlobalObject)
            remainingScratchGPR = scratchGPR;
        else
            paramGlobalObjectGPR = scratchGPR;
    } else {
        jit.move(valueRegs.payloadGPR(), scratchGPR);
        paramBaseGPR = scratchGPR;
        if (snippet->requireGlobalObject)
            paramGlobalObjectGPR = allocator.allocateScratchGPR();
    }

    JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();

    regs.append(paramValueRegs);
    regs.append(paramBaseGPR);
    if (snippet->requireGlobalObject) {
        ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
        regs.append(SnippetParams::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
    }

    if (snippet->numGPScratchRegisters) {
        unsigned i = 0;
        if (remainingScratchGPR != InvalidGPRReg) {
            gpScratch.append(remainingScratchGPR);
            ++i;
        }
        for (; i < snippet->numGPScratchRegisters; ++i)
            gpScratch.append(allocator.allocateScratchGPR());
    }

    for (unsigned i = 0; i < snippet->numFPScratchRegisters; ++i)
        fpScratch.append(allocator.allocateScratchFPR());

    // Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
    ScratchRegisterAllocator::PreservedState preservedState =
    allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);

    if (verbose) {
        dataLog("baseGPR = ", baseGPR, "\n");
        dataLog("valueRegs = ", valueRegs, "\n");
        dataLog("scratchGPR = ", scratchGPR, "\n");
        dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
        if (paramGlobalObjectGPR != InvalidGPRReg)
            dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
        dataLog("paramValueRegs = ", paramValueRegs, "\n");
        for (unsigned i = 0; i < snippet->numGPScratchRegisters; ++i)
            dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
    }

    if (snippet->requireGlobalObject)
        jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);

    // We just spill the registers used in Snippet here. For not spilled registers here explicitly,
    // they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
    // Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
    // same to valueRegs, and not include it in the used registers since it will be changed.
    RegisterSet registersToSpillForCCall;
    for (auto& value : regs) {
        SnippetReg reg = value.reg();
        if (reg.isJSValueRegs())
            registersToSpillForCCall.set(reg.jsValueRegs());
        else if (reg.isGPR())
            registersToSpillForCCall.set(reg.gpr());
        else
            registersToSpillForCCall.set(reg.fpr());
    }
    for (GPRReg reg : gpScratch)
        registersToSpillForCCall.set(reg);
    for (FPRReg reg : fpScratch)
        registersToSpillForCCall.set(reg);
    registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());

    AccessCaseSnippetParams params(state.m_vm, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
    snippet->generator()->run(jit, params);
    allocator.restoreReusedRegistersByPopping(jit, preservedState);
    state.succeed();

    CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
    if (!exceptions.empty()) {
        exceptions.link(&jit);
        allocator.restoreReusedRegistersByPopping(jit, preservedState);
        state.emitExplicitExceptionHandler();
    }
}
コード例 #8
0
void ScratchRegisterAllocator::lock(JSValueRegs regs)
{
    lock(regs.tagGPR());
    lock(regs.payloadGPR());
}
コード例 #9
0
void CallFrameShuffler::emitLoad(CachedRecovery& location)
{
    if (!location.recovery().isInJSStack())
        return;

    if (verbose)
        dataLog("   * Loading ", location.recovery(), " into ");
    VirtualRegister reg { location.recovery().virtualRegister() };
    MacroAssembler::Address address { addressForOld(reg) };

    bool tryFPR { true };
    JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
    if (wantedJSValueRegs) {
        if (wantedJSValueRegs.payloadGPR() != InvalidGPRReg
            && !m_registers[wantedJSValueRegs.payloadGPR()]
            && !m_lockedRegisters.get(wantedJSValueRegs.payloadGPR()))
            tryFPR = false;
        if (wantedJSValueRegs.tagGPR() != InvalidGPRReg
            && !m_registers[wantedJSValueRegs.tagGPR()]
            && !m_lockedRegisters.get(wantedJSValueRegs.tagGPR()))
            tryFPR = false;
    }

    if (tryFPR && location.loadsIntoFPR()) {
        FPRReg resultFPR = location.wantedFPR();
        if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR))
            resultFPR = getFreeFPR();
        if (resultFPR != InvalidFPRReg) {
            m_jit.loadDouble(address, resultFPR);
            DataFormat dataFormat = DataFormatJS;
            if (location.recovery().dataFormat() == DataFormatDouble)
                dataFormat = DataFormatDouble;
            updateRecovery(location, 
                ValueRecovery::inFPR(resultFPR, dataFormat));
            if (verbose)
                dataLog(location.recovery(), "\n");
            if (reg == newAsOld(dangerFrontier()))
                updateDangerFrontier();
            return;
        }
    }

    if (location.loadsIntoGPR()) {
        GPRReg resultGPR { wantedJSValueRegs.payloadGPR() };
        if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
            resultGPR = getFreeGPR();
        ASSERT(resultGPR != InvalidGPRReg);
        m_jit.loadPtr(address.withOffset(PayloadOffset), resultGPR);
        updateRecovery(location, 
            ValueRecovery::inGPR(resultGPR, location.recovery().dataFormat()));
        if (verbose)
            dataLog(location.recovery(), "\n");
        if (reg == newAsOld(dangerFrontier()))
            updateDangerFrontier();
        return;
    }

    ASSERT(location.recovery().technique() == DisplacedInJSStack);
    GPRReg payloadGPR { wantedJSValueRegs.payloadGPR() };
    GPRReg tagGPR { wantedJSValueRegs.tagGPR() };
    if (payloadGPR == InvalidGPRReg || m_registers[payloadGPR] || m_lockedRegisters.get(payloadGPR))
        payloadGPR = getFreeGPR();
    m_lockedRegisters.set(payloadGPR);
    if (tagGPR == InvalidGPRReg || m_registers[tagGPR] || m_lockedRegisters.get(tagGPR))
        tagGPR = getFreeGPR();
    m_lockedRegisters.clear(payloadGPR);
    ASSERT(payloadGPR != InvalidGPRReg && tagGPR != InvalidGPRReg && tagGPR != payloadGPR);
    m_jit.loadPtr(address.withOffset(PayloadOffset), payloadGPR);
    m_jit.loadPtr(address.withOffset(TagOffset), tagGPR);
    updateRecovery(location, 
        ValueRecovery::inPair(tagGPR, payloadGPR));
    if (verbose)
        dataLog(location.recovery(), "\n");
    if (reg == newAsOld(dangerFrontier()))
        updateDangerFrontier();
}
コード例 #10
0
void CallFrameShuffler::emitDisplace(CachedRecovery& location)
{
    ASSERT(location.recovery().isInRegisters());
    JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
    ASSERT(wantedJSValueRegs); // We don't support wanted FPRs on 32bit platforms

    GPRReg wantedTagGPR { wantedJSValueRegs.tagGPR() };
    GPRReg wantedPayloadGPR { wantedJSValueRegs.payloadGPR() };
    
    if (wantedTagGPR != InvalidGPRReg) {
        ASSERT(!m_lockedRegisters.get(wantedTagGPR));
        if (CachedRecovery* currentTag { m_registers[wantedTagGPR] }) {
            if (currentTag == &location) {
                if (verbose)
                    dataLog("   + ", wantedTagGPR, " is OK\n");
            } else {
                // This can never happen on 32bit platforms since we
                // have at most one wanted JSValueRegs, for the
                // callee, and no callee-save registers.
                RELEASE_ASSERT_NOT_REACHED();
            }
        }
    }

    if (wantedPayloadGPR != InvalidGPRReg) {
        ASSERT(!m_lockedRegisters.get(wantedPayloadGPR));
        if (CachedRecovery* currentPayload { m_registers[wantedPayloadGPR] }) {
            if (currentPayload == &location) {
                if (verbose)
                    dataLog("   + ", wantedPayloadGPR, " is OK\n");
            } else {
                // See above
                RELEASE_ASSERT_NOT_REACHED();
            }
        }
    }

    if (location.recovery().technique() == InPair
        || location.recovery().isInGPR()) {
        GPRReg payloadGPR;
        if (location.recovery().technique() == InPair)
            payloadGPR = location.recovery().payloadGPR();
        else
            payloadGPR = location.recovery().gpr();

        if (wantedPayloadGPR == InvalidGPRReg)
            wantedPayloadGPR = payloadGPR;

        if (payloadGPR != wantedPayloadGPR) {
            if (location.recovery().technique() == InPair
                && wantedPayloadGPR == location.recovery().tagGPR()) {
                if (verbose)
                    dataLog("   * Swapping ", payloadGPR, " and ", wantedPayloadGPR, "\n");
                m_jit.swap(payloadGPR, wantedPayloadGPR);
                updateRecovery(location, 
                    ValueRecovery::inPair(payloadGPR, wantedPayloadGPR));
            } else {
                if (verbose)
                    dataLog("   * Moving ", payloadGPR, " into ", wantedPayloadGPR, "\n");
                m_jit.move(payloadGPR, wantedPayloadGPR);
                if (location.recovery().technique() == InPair) {
                    updateRecovery(location,
                        ValueRecovery::inPair(location.recovery().tagGPR(),
                            wantedPayloadGPR));
                } else {
                    updateRecovery(location, 
                        ValueRecovery::inGPR(wantedPayloadGPR, location.recovery().dataFormat()));
                }
            }
        }

        if (wantedTagGPR == InvalidGPRReg)
            wantedTagGPR = getFreeGPR();
        switch (location.recovery().dataFormat()) {
        case DataFormatInt32:
            if (verbose)
                dataLog("   * Moving int32 tag into ", wantedTagGPR, "\n");
            m_jit.move(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
                wantedTagGPR);
            break;
        case DataFormatCell:
            if (verbose)
                dataLog("   * Moving cell tag into ", wantedTagGPR, "\n");
            m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag),
                wantedTagGPR);
            break;
        case DataFormatBoolean:
            if (verbose)
                dataLog("   * Moving boolean tag into ", wantedTagGPR, "\n");
            m_jit.move(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
                wantedTagGPR);
            break;
        case DataFormatJS:
            ASSERT(wantedTagGPR != location.recovery().payloadGPR());
            if (wantedTagGPR != location.recovery().tagGPR()) {
                if (verbose)
                    dataLog("   * Moving ", location.recovery().tagGPR(), " into ", wantedTagGPR, "\n");
                m_jit.move(location.recovery().tagGPR(), wantedTagGPR);
            }
            break;

        default:
            RELEASE_ASSERT_NOT_REACHED();
        }
    } else {
        ASSERT(location.recovery().isInFPR());
        if (wantedTagGPR == InvalidGPRReg) {
            ASSERT(wantedPayloadGPR != InvalidGPRReg);
            m_lockedRegisters.set(wantedPayloadGPR);
            wantedTagGPR = getFreeGPR();
            m_lockedRegisters.clear(wantedPayloadGPR);
        }
        if (wantedPayloadGPR == InvalidGPRReg) {
            m_lockedRegisters.set(wantedTagGPR);
            wantedPayloadGPR = getFreeGPR();
            m_lockedRegisters.clear(wantedTagGPR);
        }
        m_jit.boxDouble(location.recovery().fpr(), wantedTagGPR, wantedPayloadGPR);
    }
    updateRecovery(location, ValueRecovery::inPair(wantedTagGPR, wantedPayloadGPR));
}
コード例 #11
0
void AccessCase::emitIntrinsicGetter(AccessGenerationState& state)
{
    CCallHelpers& jit = *state.jit;
    JSValueRegs valueRegs = state.valueRegs;
    GPRReg baseGPR = state.baseGPR;
    GPRReg valueGPR = valueRegs.payloadGPR();

    switch (intrinsic()) {
    case TypedArrayLengthIntrinsic: {
        jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
        jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
        state.succeed();
        return;
    }

    case TypedArrayByteLengthIntrinsic: {
        TypedArrayType type = structure()->classInfo()->typedArrayStorageType;

        jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);

        if (elementSize(type) > 1) {
            // We can use a bitshift here since we TypedArrays cannot have byteLength that overflows an int32.
            jit.lshift32(valueGPR, Imm32(logElementSize(type)), valueGPR);
        }

        jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
        state.succeed();
        return;
    }

    case TypedArrayByteOffsetIntrinsic: {
        GPRReg scratchGPR = state.scratchGPR;

        CCallHelpers::Jump emptyByteOffset = jit.branch32(
            MacroAssembler::NotEqual,
            MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
            TrustedImm32(WastefulTypedArray));

        jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
        jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), valueGPR);
        jit.loadPtr(MacroAssembler::Address(scratchGPR, Butterfly::offsetOfArrayBuffer()), scratchGPR);
        jit.loadPtr(MacroAssembler::Address(scratchGPR, ArrayBuffer::offsetOfData()), scratchGPR);
        jit.subPtr(scratchGPR, valueGPR);

        CCallHelpers::Jump done = jit.jump();
        
        emptyByteOffset.link(&jit);
        jit.move(TrustedImmPtr(0), valueGPR);
        
        done.link(&jit);
        
        jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
        state.succeed();
        return;
    }

    default:
        break;
    }
    RELEASE_ASSERT_NOT_REACHED();
}