Пример #1
0
RegisterSet RegisterSet::webAssemblyCalleeSaveRegisters()
{
    RegisterSet result;
#if CPU(X86)
#elif CPU(X86_64)
#if !OS(WINDOWS)
    ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
    ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
    result.set(GPRInfo::regCS3);
    result.set(GPRInfo::regCS4);
#else
    ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
    ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
    result.set(GPRInfo::regCS5);
    result.set(GPRInfo::regCS6);
#endif
#elif CPU(ARM_THUMB2)
#elif CPU(ARM_TRADITIONAL)
#elif CPU(ARM64)
    ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
    ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
    result.set(GPRInfo::regCS8);
    result.set(GPRInfo::regCS9);
#elif CPU(MIPS)
#elif CPU(SH4)
#else
    UNREACHABLE_FOR_PLATFORM();
#endif
    return result;
}
Пример #2
0
RegisterSet RegisterSet::calleeSaveRegisters()
{
    RegisterSet result;
#if CPU(X86)
    result.set(X86Registers::ebx);
    result.set(X86Registers::ebp);
    result.set(X86Registers::edi);
    result.set(X86Registers::esi);
#elif CPU(X86_64)
    result.set(X86Registers::ebx);
    result.set(X86Registers::ebp);
    result.set(X86Registers::r12);
    result.set(X86Registers::r13);
    result.set(X86Registers::r14);
    result.set(X86Registers::r15);
#elif CPU(ARM_THUMB2)
    result.set(ARMRegisters::r4);
    result.set(ARMRegisters::r5);
    result.set(ARMRegisters::r6);
    result.set(ARMRegisters::r8);
#if !PLATFORM(IOS)
    result.set(ARMRegisters::r9);
#endif
    result.set(ARMRegisters::r10);
    result.set(ARMRegisters::r11);
#elif CPU(ARM_TRADITIONAL)
    result.set(ARMRegisters::r4);
    result.set(ARMRegisters::r5);
    result.set(ARMRegisters::r6);
    result.set(ARMRegisters::r7);
    result.set(ARMRegisters::r8);
    result.set(ARMRegisters::r9);
    result.set(ARMRegisters::r10);
    result.set(ARMRegisters::r11);
#elif CPU(ARM64)
    // We don't include LR in the set of callee-save registers even though it technically belongs
    // there. This is because we use this set to describe the set of registers that need to be saved
    // beyond what you would save by the platform-agnostic "preserve return address" and "restore
    // return address" operations in CCallHelpers.
    for (
        ARM64Registers::RegisterID reg = ARM64Registers::x19;
        reg <= ARM64Registers::x28;
        reg = static_cast<ARM64Registers::RegisterID>(reg + 1))
        result.set(reg);
    result.set(ARM64Registers::fp);
    for (
        ARM64Registers::FPRegisterID reg = ARM64Registers::q8;
        reg <= ARM64Registers::q15;
        reg = static_cast<ARM64Registers::FPRegisterID>(reg + 1))
        result.set(reg);
#elif CPU(MIPS)
#else
    UNREACHABLE_FOR_PLATFORM();
#endif
    return result;
}
Пример #3
0
static void setupLLInt(VM& vm, CodeBlock* codeBlock)
{
#if ENABLE(LLINT)
    LLInt::setEntrypoint(vm, codeBlock);
#else
    UNUSED_PARAM(vm);
    UNUSED_PARAM(codeBlock);
    UNREACHABLE_FOR_PLATFORM();
#endif
}
Пример #4
0
static void setupJIT(VM& vm, CodeBlock* codeBlock)
{
#if ENABLE(JIT)
    CompilationResult result = JIT::compile(&vm, codeBlock, JITCompilationMustSucceed);
    RELEASE_ASSERT(result == CompilationSuccessful);
#else
    UNUSED_PARAM(vm);
    UNUSED_PARAM(codeBlock);
    UNREACHABLE_FOR_PLATFORM();
#endif
}
Пример #5
0
bool GetByIdStatus::computeForChain(CodeBlock* profiledBlock, StringImpl* uid, PassRefPtr<IntendedStructureChain> passedChain)
{
#if ENABLE(JIT)
    RefPtr<IntendedStructureChain> chain = passedChain;

    // Validate the chain. If the chain is invalid, then currently the best thing
    // we can do is to assume that TakesSlow is true. In the future, it might be
    // worth exploring reifying the structure chain from the structure we've got
    // instead of using the one from the cache, since that will do the right things
    // if the structure chain has changed. But that may be harder, because we may
    // then end up having a different type of access altogether. And it currently
    // does not appear to be worth it to do so -- effectively, the heuristic we
    // have now is that if the structure chain has changed between when it was
    // cached on in the baseline JIT and when the DFG tried to inline the access,
    // then we fall back on a polymorphic access.
    if (!chain->isStillValid())
        return false;

    if (chain->head()->takesSlowPathInDFGForImpureProperty())
        return false;
    size_t chainSize = chain->size();
    for (size_t i = 0; i < chainSize; i++) {
        if (chain->at(i)->takesSlowPathInDFGForImpureProperty())
            return false;
    }

    JSObject* currentObject = chain->terminalPrototype();
    Structure* currentStructure = chain->last();

    ASSERT_UNUSED(currentObject, currentObject);

    unsigned attributesIgnored;
    JSCell* specificValue;

    PropertyOffset offset = currentStructure->getConcurrently(
                                *profiledBlock->vm(), uid, attributesIgnored, specificValue);
    if (currentStructure->isDictionary())
        specificValue = 0;
    if (!isValidOffset(offset))
        return false;

    m_variants.append(
        GetByIdVariant(StructureSet(chain->head()), offset, specificValue, chain));
    return true;
#else // ENABLE(JIT)
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(uid);
    UNUSED_PARAM(passedChain);
    UNREACHABLE_FOR_PLATFORM();
    return false;
#endif // ENABLE(JIT)
}
RegisterSet RegisterSet::calleeSaveRegisters()
{
    RegisterSet result;
#if CPU(X86_64)
    result.set(X86Registers::ebx);
    result.set(X86Registers::ebp);
    result.set(X86Registers::r10);
    result.set(X86Registers::r12);
    result.set(X86Registers::r13);
    result.set(X86Registers::r14);
    result.set(X86Registers::r15);
#else
    UNREACHABLE_FOR_PLATFORM();
#endif
    return result;
}
Пример #7
0
extern "C" JSC::LLVMAPI* initializeAndGetJSCLLVMAPI(void (*callback)(const char*, ...))
{
    g_llvmTrapCallback = callback;
    
    LLVMInstallFatalErrorHandler(llvmCrash);

    if (!LLVMStartMultithreaded())
        callback("Could not start LLVM multithreading");
    
    LLVMLinkInMCJIT();
    
    // You think you want to call LLVMInitializeNativeTarget()? Think again. This presumes that
    // LLVM was ./configured correctly, which won't be the case in cross-compilation situations.
    
#if CPU(X86_64)
    LLVMInitializeX86TargetInfo();
    LLVMInitializeX86Target();
    LLVMInitializeX86TargetMC();
    LLVMInitializeX86AsmPrinter();
    LLVMInitializeX86Disassembler();
#elif CPU(ARM64)
    LLVMInitializeARM64TargetInfo();
    LLVMInitializeARM64Target();
    LLVMInitializeARM64TargetMC();
    LLVMInitializeARM64AsmPrinter();
    LLVMInitializeARM64Disassembler();
#else
    UNREACHABLE_FOR_PLATFORM();
#endif
    
    const char* args[] = {
        "llvmForJSC.dylib",
        "-enable-stackmap-liveness=true",
        "-enable-patchpoint-liveness=true"
    };
    llvm::cl::ParseCommandLineOptions(sizeof(args) / sizeof(const char*), args);
    
    JSC::LLVMAPI* result = new JSC::LLVMAPI;
    
#define LLVM_API_FUNCTION_ASSIGNMENT(returnType, name, signature) \
    result->name = LLVM##name;
    FOR_EACH_LLVM_API_FUNCTION(LLVM_API_FUNCTION_ASSIGNMENT);
#undef LLVM_API_FUNCTION_ASSIGNMENT
    
    return result;
}
Пример #8
0
void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, Identifier& ident, Structure* structure)
{
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
    // Validate the chain. If the chain is invalid, then currently the best thing
    // we can do is to assume that TakesSlow is true. In the future, it might be
    // worth exploring reifying the structure chain from the structure we've got
    // instead of using the one from the cache, since that will do the right things
    // if the structure chain has changed. But that may be harder, because we may
    // then end up having a different type of access altogether. And it currently
    // does not appear to be worth it to do so -- effectively, the heuristic we
    // have now is that if the structure chain has changed between when it was
    // cached on in the baseline JIT and when the DFG tried to inline the access,
    // then we fall back on a polymorphic access.
    Structure* currentStructure = structure;
    JSObject* currentObject = 0;
    for (unsigned i = 0; i < result.m_chain.size(); ++i) {
        ASSERT(!currentStructure->isDictionary());
        currentObject = asObject(currentStructure->prototypeForLookup(profiledBlock));
        currentStructure = result.m_chain[i];
        if (currentObject->structure() != currentStructure)
            return;
    }
        
    ASSERT(currentObject);
        
    unsigned attributesIgnored;
    JSCell* specificValue;
        
    result.m_offset = currentStructure->get(
        *profiledBlock->vm(), ident, attributesIgnored, specificValue);
    if (currentStructure->isDictionary())
        specificValue = 0;
    if (!isValidOffset(result.m_offset))
        return;
        
    result.m_structureSet.add(structure);
    result.m_specificValue = JSValue(specificValue);
#else
    UNUSED_PARAM(result);
    UNUSED_PARAM(profiledBlock);
    UNUSED_PARAM(ident);
    UNUSED_PARAM(structure);
    UNREACHABLE_FOR_PLATFORM();
#endif
}
Пример #9
0
RegisterSet RegisterSet::ftlCalleeSaveRegisters()
{
    RegisterSet result;
#if ENABLE(FTL_JIT)
#if CPU(X86_64) && !OS(WINDOWS)
    result.set(GPRInfo::regCS0);
    result.set(GPRInfo::regCS1);
    result.set(GPRInfo::regCS2);
    ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
    ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
    result.set(GPRInfo::regCS3);
    result.set(GPRInfo::regCS4);
#elif CPU(ARM64)
    // LLVM might save and use all ARM64 callee saves specified in the ABI.
    result.set(GPRInfo::regCS0);
    result.set(GPRInfo::regCS1);
    result.set(GPRInfo::regCS2);
    result.set(GPRInfo::regCS3);
    result.set(GPRInfo::regCS4);
    result.set(GPRInfo::regCS5);
    result.set(GPRInfo::regCS6);
    result.set(GPRInfo::regCS7);
    ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
    ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
    result.set(GPRInfo::regCS8);
    result.set(GPRInfo::regCS9);
    result.set(FPRInfo::fpRegCS0);
    result.set(FPRInfo::fpRegCS1);
    result.set(FPRInfo::fpRegCS2);
    result.set(FPRInfo::fpRegCS3);
    result.set(FPRInfo::fpRegCS4);
    result.set(FPRInfo::fpRegCS5);
    result.set(FPRInfo::fpRegCS6);
    result.set(FPRInfo::fpRegCS7);
#else
    UNREACHABLE_FOR_PLATFORM();
#endif
#endif
    return result;
}
Пример #10
0
MacroAssemblerCodeRef generateRegisterPreservationWrapper(VM& vm, ExecutableBase* executable, MacroAssemblerCodePtr target)
{
#if ENABLE(FTL_JIT)
    // We shouldn't ever be generating wrappers for native functions.
    RegisterSet toSave = registersToPreserve();
    ptrdiff_t offset = registerPreservationOffset();
    
    AssemblyHelpers jit(&vm, 0);
    
    jit.preserveReturnAddressAfterCall(GPRInfo::regT1);
    jit.load32(
        AssemblyHelpers::Address(
            AssemblyHelpers::stackPointerRegister,
            (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset),
        GPRInfo::regT2);
    
    // Place the stack pointer where we want it to be.
    jit.subPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister);
    
    // Compute the number of things we will be copying.
    jit.add32(
        AssemblyHelpers::TrustedImm32(
            JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize),
        GPRInfo::regT2);

    ASSERT(!toSave.get(GPRInfo::regT4));
    jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT4);
    
    AssemblyHelpers::Label loop = jit.label();
    jit.sub32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
    jit.load64(AssemblyHelpers::Address(GPRInfo::regT4, offset), GPRInfo::regT0);
    jit.store64(GPRInfo::regT0, GPRInfo::regT4);
    jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT4);
    jit.branchTest32(AssemblyHelpers::NonZero, GPRInfo::regT2).linkTo(loop, &jit);

    // At this point regT4 + offset points to where we save things.
    ptrdiff_t currentOffset = 0;
    jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    
    for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) {
        if (!toSave.get(gpr))
            continue;
        currentOffset += sizeof(Register);
        jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    }
    
    // Assume that there aren't any saved FP registers.
    
    // Restore the tag registers.
    jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
    jit.add64(AssemblyHelpers::TrustedImm32(TagMask - TagTypeNumber), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
    
    jit.move(
        AssemblyHelpers::TrustedImmPtr(
            vm.getCTIStub(registerRestorationThunkGenerator).code().executableAddress()),
        GPRInfo::nonArgGPR0);
    jit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR0);
    AssemblyHelpers::Jump jump = jit.jump();
    
    LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID);
    linkBuffer.link(jump, CodeLocationLabel(target));

    if (Options::verboseFTLToJSThunk())
        dataLog("Need a thunk for calls from FTL to non-FTL version of ", *executable, "\n");
    
    return FINALIZE_DFG_CODE(linkBuffer, ("Register preservation wrapper for %s/%s, %p", toCString(executable->hashFor(CodeForCall)).data(), toCString(executable->hashFor(CodeForConstruct)).data(), target.executableAddress()));
#else // ENABLE(FTL_JIT)
    UNUSED_PARAM(vm);
    UNUSED_PARAM(executable);
    UNUSED_PARAM(target);
    // We don't support non-FTL builds for two reasons:
    // - It just so happens that currently only the FTL bottoms out in this code.
    // - The code above uses 64-bit instructions. It doesn't necessarily have to; it would be
    //   easy to change it so that it doesn't. But obviously making that change would be a
    //   prerequisite to removing this #if.
    UNREACHABLE_FOR_PLATFORM();
    return MacroAssemblerCodeRef();
#endif // ENABLE(FTL_JIT)
}
Пример #11
0
static void generateRegisterRestoration(AssemblyHelpers& jit)
{
#if ENABLE(FTL_JIT)
    RegisterSet toSave = registersToPreserve();
    ptrdiff_t offset = registerPreservationOffset();
    
    ASSERT(!toSave.get(GPRInfo::regT4));

    // We need to place the stack pointer back to where the caller thought they left it.
    // But also, in order to recover the registers, we need to figure out how big the
    // arguments area is.
    
    jit.load32(
        AssemblyHelpers::Address(
            AssemblyHelpers::stackPointerRegister,
            (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset),
        GPRInfo::regT4);
    
    jit.move(GPRInfo::regT4, GPRInfo::regT2);
    jit.lshift32(AssemblyHelpers::TrustedImm32(3), GPRInfo::regT2);
    
    jit.addPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister);
    jit.addPtr(AssemblyHelpers::stackPointerRegister, GPRInfo::regT2);
    
    // We saved things at:
    //
    //     adjSP + (JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + NumArgs) * 8
    //
    // Where:
    //
    //     adjSP = origSP - offset
    //
    // regT2 now points at:
    //
    //     origSP + NumArgs * 8
    //   = adjSP + offset + NumArgs * 8
    // 
    // So if we subtract offset and then add JSStack::CallFrameHeaderSize and subtract
    // JSStack::CallerFrameAndPCSize, we'll get the thing we want.
    ptrdiff_t currentOffset = -offset + sizeof(Register) * (
        JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize);
    jit.loadPtr(AssemblyHelpers::Address(GPRInfo::regT2, currentOffset), GPRInfo::regT1);
    
    for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) {
        if (!toSave.get(gpr))
            continue;
        currentOffset += sizeof(Register);
        jit.load64(AssemblyHelpers::Address(GPRInfo::regT2, currentOffset), gpr);
    }
    
    // Thunks like this rely on the ArgumentCount being intact. Pay it forward.
    jit.store32(
        GPRInfo::regT4,
        AssemblyHelpers::Address(
            AssemblyHelpers::stackPointerRegister,
            (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset));
    
    if (!ASSERT_DISABLED) {
        AssemblyHelpers::Jump ok = jit.branchPtr(
            AssemblyHelpers::Above, GPRInfo::regT1, AssemblyHelpers::TrustedImmPtr(static_cast<size_t>(0x1000)));
        jit.breakpoint();
        ok.link(&jit);
    }
    
    jit.jump(GPRInfo::regT1);
#else // ENABLE(FTL_JIT)
    UNUSED_PARAM(jit);
    UNREACHABLE_FOR_PLATFORM();
#endif // ENABLE(FTL_JIT)
}
Пример #12
0
bool JSDataView::setIndex(ExecState*, unsigned, JSValue)
{
    UNREACHABLE_FOR_PLATFORM();
    return false;
}
Пример #13
0
bool JSDataView::set(ExecState*, JSObject*, unsigned, unsigned)
{
    UNREACHABLE_FOR_PLATFORM();
    return false;
}
Пример #14
0
JSDataView* JSDataView::create(ExecState*, Structure*, unsigned)
{
    UNREACHABLE_FOR_PLATFORM();
    return 0;
}
Пример #15
0
ArrayBuffer* JSDataView::slowDownAndWasteMemory(JSArrayBufferView*)
{
    UNREACHABLE_FOR_PLATFORM();
    return 0;
}
Пример #16
0
JITWorklist::~JITWorklist()
{
    UNREACHABLE_FOR_PLATFORM();
}