示例#1
0
void getFunctionEntrypoint(VM& vm, CodeSpecializationKind kind, JITCode& jitCode, MacroAssemblerCodePtr& arityCheck)
{
    if (!vm.canUseJIT()) {
        if (kind == CodeForCall) {
            jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), JITCode::InterpreterThunk);
            arityCheck = MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check);
            return;
        }

        ASSERT(kind == CodeForConstruct);
        jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), JITCode::InterpreterThunk);
        arityCheck = MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check);
        return;
    }
    
#if ENABLE(JIT)
    if (kind == CodeForCall) {
        jitCode = JITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk);
        arityCheck = vm.getCTIStub(functionForCallArityCheckThunkGenerator).code();
        return;
    }

    ASSERT(kind == CodeForConstruct);
    jitCode = JITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk);
    arityCheck = vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code();
#endif // ENABLE(JIT)
}
示例#2
0
static void setFunctionEntrypoint(VM& vm, CodeBlock* codeBlock)
{
    CodeSpecializationKind kind = codeBlock->specializationKind();
    
    if (!vm.canUseJIT()) {
        if (kind == CodeForCall) {
            codeBlock->setJITCode(
                adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), JITCode::InterpreterThunk)),
                MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check));
            return;
        }

        ASSERT(kind == CodeForConstruct);
        codeBlock->setJITCode(
            adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), JITCode::InterpreterThunk)),
            MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check));
        return;
    }
    
#if ENABLE(JIT)
    if (kind == CodeForCall) {
        codeBlock->setJITCode(
            adoptRef(new DirectJITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk)),
            vm.getCTIStub(functionForCallArityCheckThunkGenerator).code());
        return;
    }

    ASSERT(kind == CodeForConstruct);
    codeBlock->setJITCode(
        adoptRef(new DirectJITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk)),
        vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code());
#endif // ENABLE(JIT)
}
示例#3
0
static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind)
{
    ExecState* exec = execCallee->callerFrame();
    VM* vm = &exec->vm();

    execCallee->setScope(exec->scope());
    execCallee->setCodeBlock(0);

    if (kind == CodeForCall) {
        CallData callData;
        CallType callType = getCallData(callee, callData);
    
        ASSERT(callType != CallTypeJS);
    
        if (callType == CallTypeHost) {
            NativeCallFrameTracer tracer(vm, execCallee);
            execCallee->setCallee(asObject(callee));
            vm->hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
            if (vm->exception())
                return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();

            return reinterpret_cast<void*>(getHostCallReturnValue);
        }
    
        ASSERT(callType == CallTypeNone);
        exec->vm().throwException(exec, createNotAFunctionError(exec, callee));
        return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
    }

    ASSERT(kind == CodeForConstruct);
    
    ConstructData constructData;
    ConstructType constructType = getConstructData(callee, constructData);
    
    ASSERT(constructType != ConstructTypeJS);
    
    if (constructType == ConstructTypeHost) {
        NativeCallFrameTracer tracer(vm, execCallee);
        execCallee->setCallee(asObject(callee));
        vm->hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
        if (vm->exception())
            return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();

        return reinterpret_cast<void*>(getHostCallReturnValue);
    }
    
    ASSERT(constructType == ConstructTypeNone);
    exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
    return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
}
示例#4
0
void JSCallBase::link(VM& vm, LinkBuffer& linkBuffer)
{
    linkBuffer.link(
        m_slowCall, FunctionPtr(vm.getCTIStub(linkCallThunkGenerator).code().executableAddress()));

    m_callLinkInfo->setCallLocations(linkBuffer.locationOfNearCall(m_slowCall),
        linkBuffer.locationOf(m_targetToCheck), linkBuffer.locationOfNearCall(m_fastCall));
}
示例#5
0
static CompilationResult compileImpl(
    VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode,
    unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues,
    PassRefPtr<DeferredCompilationCallback> callback)
{
    SamplingRegion samplingRegion("DFG Compilation (Driver)");
    
    if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionCount())
        || !FunctionWhitelist::ensureGlobalWhitelist().contains(codeBlock))
        return CompilationFailed;
    
    numCompilations++;
    
    ASSERT(codeBlock);
    ASSERT(codeBlock->alternative());
    ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
    ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITCode::DFGJIT);
    
    if (logCompilationChanges(mode))
        dataLog("DFG(Driver) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
    
    // Make sure that any stubs that the DFG is going to use are initialized. We want to
    // make sure that all JIT code generation does finalization on the main thread.
    vm.getCTIStub(osrExitGenerationThunkGenerator);
    vm.getCTIStub(throwExceptionFromCallSlowPathGenerator);
    vm.getCTIStub(linkCallThunkGenerator);
    vm.getCTIStub(linkPolymorphicCallThunkGenerator);
    
    if (vm.typeProfiler())
        vm.typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for DFG compilation."));
    
    RefPtr<Plan> plan = adoptRef(
        new Plan(codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues));
    
    plan->callback = callback;
    if (Options::enableConcurrentJIT()) {
        Worklist* worklist = ensureGlobalWorklistFor(mode);
        if (logCompilationChanges(mode))
            dataLog("Deferring DFG compilation of ", *codeBlock, " with queue length ", worklist->queueLength(), ".\n");
        worklist->enqueue(plan);
        return CompilationDeferred;
    }
    
    plan->compileInThread(*vm.dfgState, 0);
    return plan->finalizeWithoutNotifyingCallback();
}
示例#6
0
void getProgramEntrypoint(VM& vm, JITCode& jitCode)
{
    if (!vm.canUseJIT()) {
        jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk);
        return;
    }
#if ENABLE(JIT)
    jitCode = JITCode(vm.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk);
#endif
}
示例#7
0
void JSCallBase::link(VM& vm, LinkBuffer& linkBuffer)
{
    ThunkGenerator generator = linkThunkGeneratorFor(
        CallLinkInfo::specializationKindFor(m_type), MustPreserveRegisters);
    
    linkBuffer.link(
        m_slowCall, FunctionPtr(vm.getCTIStub(generator).code().executableAddress()));

    m_callLinkInfo->setUpCallFromFTL(m_type, m_origin, linkBuffer.locationOfNearCall(m_slowCall),
        linkBuffer.locationOf(m_targetToCheck), linkBuffer.locationOfNearCall(m_fastCall),
        GPRInfo::regT0);
}
示例#8
0
static void setProgramEntrypoint(VM& vm, CodeBlock* codeBlock)
{
    if (!vm.canUseJIT()) {
        codeBlock->setJITCode(
            adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk)),
            MacroAssemblerCodePtr());
        return;
    }
#if ENABLE(JIT)
    codeBlock->setJITCode(
        adoptRef(new DirectJITCode(vm.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk)),
        MacroAssemblerCodePtr());
#endif
}
示例#9
0
void JSCallBase::link(VM& vm, LinkBuffer& linkBuffer)
{
    ThunkGenerator generator = linkThunkGeneratorFor(
        CallLinkInfo::specializationKindFor(m_type), MustPreserveRegisters);
    
    linkBuffer.link(
        m_slowCall, FunctionPtr(vm.getCTIStub(generator).code().executableAddress()));
    
    m_callLinkInfo->isFTL = true;
    m_callLinkInfo->callType = m_type;
    m_callLinkInfo->codeOrigin = m_origin;
    m_callLinkInfo->callReturnLocation = linkBuffer.locationOfNearCall(m_slowCall);
    m_callLinkInfo->hotPathBegin = linkBuffer.locationOf(m_targetToCheck);
    m_callLinkInfo->hotPathOther = linkBuffer.locationOfNearCall(m_fastCall);
    m_callLinkInfo->calleeGPR = GPRInfo::regT0;
}
示例#10
0
inline char* linkFor(ExecState* execCallee, CodeSpecializationKind kind)
{
    ExecState* exec = execCallee->callerFrame();
    VM* vm = &exec->vm();
    NativeCallFrameTracer tracer(vm, exec);
    
    JSValue calleeAsValue = execCallee->calleeAsValue();
    JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
    if (!calleeAsFunctionCell)
        return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));

    JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
    execCallee->setScope(callee->scopeUnchecked());
    ExecutableBase* executable = callee->executable();

    MacroAssemblerCodePtr codePtr;
    CodeBlock* codeBlock = 0;
    if (executable->isHostFunction())
        codePtr = executable->generatedJITCodeFor(kind)->addressForCall();
    else {
        FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
        JSObject* error = functionExecutable->prepareForExecution(execCallee, callee->scope(), kind);
        if (error) {
            vm->throwException(exec, createStackOverflowError(exec));
            return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
        }
        codeBlock = functionExecutable->codeBlockFor(kind);
        if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
            codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
        else
            codePtr = functionExecutable->generatedJITCodeFor(kind)->addressForCall();
    }
    CallLinkInfo& callLinkInfo = exec->codeBlock()->getCallLinkInfo(execCallee->returnPC());
    if (!callLinkInfo.seenOnce())
        callLinkInfo.setSeen();
    else
        linkFor(execCallee, callLinkInfo, codeBlock, callee, codePtr, kind);
    return reinterpret_cast<char*>(codePtr.executableAddress());
}
示例#11
0
inline char* virtualForWithFunction(ExecState* execCallee, CodeSpecializationKind kind, JSCell*& calleeAsFunctionCell)
{
    ExecState* exec = execCallee->callerFrame();
    VM* vm = &exec->vm();
    NativeCallFrameTracer tracer(vm, exec);

    JSValue calleeAsValue = execCallee->calleeAsValue();
    calleeAsFunctionCell = getJSFunction(calleeAsValue);
    if (UNLIKELY(!calleeAsFunctionCell))
        return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));
    
    JSFunction* function = jsCast<JSFunction*>(calleeAsFunctionCell);
    execCallee->setScope(function->scopeUnchecked());
    ExecutableBase* executable = function->executable();
    if (UNLIKELY(!executable->hasJITCodeFor(kind))) {
        FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
        JSObject* error = functionExecutable->prepareForExecution(execCallee, function->scope(), kind);
        if (error) {
            exec->vm().throwException(execCallee, error);
            return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
        }
    }
    return reinterpret_cast<char*>(executable->generatedJITCodeWithArityCheckFor(kind).executableAddress());
}
示例#12
0
MacroAssemblerCodeRef generateRegisterPreservationWrapper(VM& vm, ExecutableBase* executable, MacroAssemblerCodePtr target)
{
#if ENABLE(FTL_JIT)
    // We shouldn't ever be generating wrappers for native functions.
    RegisterSet toSave = registersToPreserve();
    ptrdiff_t offset = registerPreservationOffset();
    
    AssemblyHelpers jit(&vm, 0);
    
    jit.preserveReturnAddressAfterCall(GPRInfo::regT1);
    jit.load32(
        AssemblyHelpers::Address(
            AssemblyHelpers::stackPointerRegister,
            (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset),
        GPRInfo::regT2);
    
    // Place the stack pointer where we want it to be.
    jit.subPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister);
    
    // Compute the number of things we will be copying.
    jit.add32(
        AssemblyHelpers::TrustedImm32(
            JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize),
        GPRInfo::regT2);

    ASSERT(!toSave.get(GPRInfo::regT4));
    jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT4);
    
    AssemblyHelpers::Label loop = jit.label();
    jit.sub32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
    jit.load64(AssemblyHelpers::Address(GPRInfo::regT4, offset), GPRInfo::regT0);
    jit.store64(GPRInfo::regT0, GPRInfo::regT4);
    jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT4);
    jit.branchTest32(AssemblyHelpers::NonZero, GPRInfo::regT2).linkTo(loop, &jit);

    // At this point regT4 + offset points to where we save things.
    ptrdiff_t currentOffset = 0;
    jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    
    for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) {
        if (!toSave.get(gpr))
            continue;
        currentOffset += sizeof(Register);
        jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    }
    
    // Assume that there aren't any saved FP registers.
    
    // Restore the tag registers.
    jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
    jit.add64(AssemblyHelpers::TrustedImm32(TagMask - TagTypeNumber), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
    
    jit.move(
        AssemblyHelpers::TrustedImmPtr(
            vm.getCTIStub(registerRestorationThunkGenerator).code().executableAddress()),
        GPRInfo::nonArgGPR0);
    jit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR0);
    AssemblyHelpers::Jump jump = jit.jump();
    
    LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID);
    linkBuffer.link(jump, CodeLocationLabel(target));

    if (Options::verboseFTLToJSThunk())
        dataLog("Need a thunk for calls from FTL to non-FTL version of ", *executable, "\n");
    
    return FINALIZE_DFG_CODE(linkBuffer, ("Register preservation wrapper for %s/%s, %p", toCString(executable->hashFor(CodeForCall)).data(), toCString(executable->hashFor(CodeForConstruct)).data(), target.executableAddress()));
#else // ENABLE(FTL_JIT)
    UNUSED_PARAM(vm);
    UNUSED_PARAM(executable);
    UNUSED_PARAM(target);
    // We don't support non-FTL builds for two reasons:
    // - It just so happens that currently only the FTL bottoms out in this code.
    // - The code above uses 64-bit instructions. It doesn't necessarily have to; it would be
    //   easy to change it so that it doesn't. But obviously making that change would be a
    //   prerequisite to removing this #if.
    UNREACHABLE_FOR_PLATFORM();
    return MacroAssemblerCodeRef();
#endif // ENABLE(FTL_JIT)
}