Beispiel #1
0
static bool
DecodeExport(JSContext* cx, Decoder& d, ModuleGenerator& mg, ExportMap* exportMap)
{
    if (!d.readCStringIf(FuncSubsection))
        return Fail(cx, d, "expected 'func' tag");

    uint32_t funcIndex;
    if (!d.readVarU32(&funcIndex))
        return Fail(cx, d, "expected export internal index");

    if (funcIndex >= mg.numFuncSigs())
        return Fail(cx, d, "export function index out of range");

    uint32_t exportIndex;
    if (!mg.declareExport(funcIndex, &exportIndex))
        return false;

    MOZ_ASSERT(exportIndex <= exportMap->exportNames.length());
    if (exportIndex == exportMap->exportNames.length()) {
        UniqueChars funcName(JS_smprintf("%u", unsigned(funcIndex)));
        if (!funcName || !exportMap->exportNames.emplaceBack(Move(funcName)))
            return false;
    }

    if (!exportMap->fieldsToExports.append(exportIndex))
        return false;

    const char* chars;
    if (!d.readCString(&chars))
        return Fail(cx, d, "expected export external name string");

    return exportMap->fieldNames.emplaceBack(DuplicateString(chars));
}
Beispiel #2
0
// If an exception is thrown, simply pop all frames (since asm.js does not
// contain try/catch). To do this:
//  1. Restore 'sp' to it's value right after the PushRegsInMask in GenerateEntry.
//  2. PopRegsInMask to restore the caller's non-volatile registers.
//  3. Return (to CallAsmJS).
static bool
GenerateThrowStub(ModuleGenerator& mg, Label* throwLabel)
{
    MacroAssembler& masm = mg.masm();

    masm.haltingAlign(CodeAlignment);
    Offsets offsets;
    offsets.begin = masm.currentOffset();
    masm.bind(throwLabel);

    // We are about to pop all frames in this WasmActivation. Set fp to null to
    // maintain the invariant that fp is either null or pointing to a valid
    // frame.
    Register scratch = ABIArgGenerator::NonArgReturnReg0;
    masm.loadWasmActivation(scratch);
    masm.storePtr(ImmWord(0), Address(scratch, WasmActivation::offsetOfFP()));

    masm.setFramePushed(FramePushedForEntrySP);
    masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
    masm.Pop(scratch);
    masm.PopRegsInMask(NonVolatileRegs);
    MOZ_ASSERT(masm.framePushed() == 0);

    masm.mov(ImmWord(0), ReturnReg);
    masm.ret();

    if (masm.oom())
        return false;

    offsets.end = masm.currentOffset();
    return mg.defineInlineStub(offsets);
}
Beispiel #3
0
static bool
DecodeCodeSection(const ModuleEnvironment& env, DecoderT& d, ModuleGenerator& mg)
{
    if (!env.codeSection) {
        if (env.numFuncDefs() != 0)
            return d.fail("expected code section");

        return mg.finishFuncDefs();
    }

    uint32_t numFuncDefs;
    if (!d.readVarU32(&numFuncDefs))
        return d.fail("expected function body count");

    if (numFuncDefs != env.numFuncDefs())
        return d.fail("function body count does not match function signature count");

    for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
        if (!DecodeFunctionBody(d, mg, env.numFuncImports() + funcDefIndex))
            return false;
    }

    if (!d.finishSection(*env.codeSection, "code"))
        return false;

    return mg.finishFuncDefs();
}
Beispiel #4
0
// Generate a stub that is jumped to from an out-of-bounds heap access when
// there are throwing semantics. This stub calls a C++ function to report an
// error and then jumps to the throw stub to pop the activation.
static bool
GenerateOutOfBoundsStub(ModuleGenerator& mg, Label* throwLabel)
{
    MacroAssembler& masm = mg.masm();

    masm.haltingAlign(CodeAlignment);
    Offsets offsets;
    offsets.begin = masm.currentOffset();
    masm.bind(masm.asmOnOutOfBoundsLabel());

    // sp can be anything at this point, so ensure it is aligned when calling
    // into C++.  We unconditionally jump to throw so don't worry about restoring sp.
    masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));

    // OnOutOfBounds always throws.
    masm.assertStackAlignment(ABIStackAlignment);
    masm.call(SymbolicAddress::OnOutOfBounds);
    masm.jump(throwLabel);

    if (masm.oom())
        return false;

    offsets.end = masm.currentOffset();
    return mg.defineOutOfBoundsStub(offsets);
}
Beispiel #5
0
static bool
DecodeFunctionBodies(JSContext* cx, Decoder& d, ModuleGenerator& mg)
{
    if (!mg.startFuncDefs())
        return false;

    uint32_t sectionStart;
    if (!d.startSection(FunctionBodiesId, &sectionStart))
        return Fail(cx, d, "failed to start section");

    if (sectionStart == Decoder::NotStarted) {
        if (mg.numFuncSigs() != 0)
            return Fail(cx, d, "expected function bodies");

        return mg.finishFuncDefs();
    }

    uint32_t numFuncBodies;
    if (!d.readVarU32(&numFuncBodies))
        return Fail(cx, d, "expected function body count");

    if (numFuncBodies != mg.numFuncSigs())
        return Fail(cx, d, "function body count does not match function signature count");

    for (uint32_t funcIndex = 0; funcIndex < numFuncBodies; funcIndex++) {
        if (!DecodeFunctionBody(cx, d, mg, funcIndex))
            return false;
    }

    if (!d.finishSection(sectionStart))
        return Fail(cx, d, "function section byte size mismatch");

    return mg.finishFuncDefs();
}
Beispiel #6
0
// Generate a stub that is called immediately after the prologue when there is a
// stack overflow. This stub calls a C++ function to report the error and then
// jumps to the throw stub to pop the activation.
static bool
GenerateStackOverflowStub(ModuleGenerator& mg, Label* throwLabel)
{
    MacroAssembler& masm = mg.masm();

    masm.haltingAlign(CodeAlignment);
    Offsets offsets;
    offsets.begin = masm.currentOffset();
    masm.bind(masm.asmStackOverflowLabel());

    // If we reach here via the non-profiling prologue, WasmActivation::fp has
    // not been updated. To enable stack unwinding from C++, store to it now. If
    // we reached here via the profiling prologue, we'll just store the same
    // value again. Do not update AsmJSFrame::callerFP as it is not necessary in
    // the non-profiling case (there is no return path from this point) and, in
    // the profiling case, it is already correct.
    Register activation = ABIArgGenerator::NonArgReturnReg0;
    masm.loadWasmActivation(activation);
    masm.storePtr(masm.getStackPointer(), Address(activation, WasmActivation::offsetOfFP()));

    // Prepare the stack for calling C++.
    if (uint32_t d = StackDecrementForCall(ABIStackAlignment, sizeof(AsmJSFrame), ShadowStackSpace))
        masm.subFromStackPtr(Imm32(d));

    // No need to restore the stack; the throw stub pops everything.
    masm.assertStackAlignment(ABIStackAlignment);
    masm.call(SymbolicAddress::ReportOverRecursed);
    masm.jump(throwLabel);

    if (masm.oom())
        return false;

    offsets.end = masm.currentOffset();
    return mg.defineInlineStub(offsets);
}
Beispiel #7
0
static bool
DecodeFunctionBody(JSContext* cx, Decoder& d, ModuleGenerator& mg, uint32_t funcIndex)
{
    int64_t before = PRMJ_Now();

    uint32_t bodySize;
    if (!d.readVarU32(&bodySize))
        return Fail(cx, d, "expected number of function body bytes");

    if (d.bytesRemain() < bodySize)
        return Fail(cx, d, "function body length too big");

    const uint8_t* bodyBegin = d.currentPosition();
    const uint8_t* bodyEnd = bodyBegin + bodySize;

    FunctionGenerator fg;
    if (!mg.startFuncDef(d.currentOffset(), &fg))
        return false;

    ValTypeVector locals;
    if (!locals.appendAll(mg.funcSig(funcIndex).args()))
        return false;

    if (!DecodeLocalEntries(d, &locals))
        return Fail(cx, d, "failed decoding local entries");

    for (ValType type : locals) {
        if (!CheckValType(cx, d, type))
            return false;
    }

    FunctionDecoder f(cx, d, mg, fg, funcIndex, locals);

    ExprType type = ExprType::Void;

    while (d.currentPosition() < bodyEnd) {
        if (!DecodeExpr(f, &type))
            return false;
    }

    if (!CheckType(f, type, f.sig().ret()))
        return false;

    if (d.currentPosition() != bodyEnd)
        return Fail(cx, d, "function body length mismatch");

    if (!fg.bytes().resize(bodySize))
        return false;

    memcpy(fg.bytes().begin(), bodyBegin, bodySize);

    int64_t after = PRMJ_Now();
    unsigned generateTime = (after - before) / PRMJ_USEC_PER_MSEC;

    return mg.finishFuncDef(funcIndex, generateTime, &fg);
}
Beispiel #8
0
static bool
DecodeMemoryExport(JSContext* cx, Decoder& d, ModuleGenerator& mg, CStringSet* dupSet)
{
    if (!mg.usesHeap())
        return Fail(cx, d, "cannot export memory with no memory section");

    UniqueChars fieldName = DecodeFieldName(cx, d, dupSet);
    if (!fieldName)
        return false;

    return mg.addMemoryExport(Move(fieldName));
}
Beispiel #9
0
static bool
DecodeMemory(JSContext* cx, Decoder& d, ModuleGenerator& mg, MutableHandle<ArrayBufferObject*> heap)
{
    uint32_t sectionStart;
    if (!d.startSection(MemoryId, &sectionStart))
        return Fail(cx, d, "failed to start section");
    if (sectionStart == Decoder::NotStarted)
        return true;

    uint32_t initialSizePages;
    if (!d.readVarU32(&initialSizePages))
        return Fail(cx, d, "expected initial memory size");

    CheckedInt<int32_t> initialSize = initialSizePages;
    initialSize *= PageSize;
    if (!initialSize.isValid())
        return Fail(cx, d, "initial memory size too big");

    uint32_t maxSizePages;
    if (!d.readVarU32(&maxSizePages))
        return Fail(cx, d, "expected initial memory size");

    CheckedInt<int32_t> maxSize = maxSizePages;
    maxSize *= PageSize;
    if (!maxSize.isValid())
        return Fail(cx, d, "initial memory size too big");

    uint8_t exported;
    if (!d.readFixedU8(&exported))
        return Fail(cx, d, "expected exported byte");

    if (exported) {
        UniqueChars fieldName = DuplicateString("memory");
        if (!fieldName || !mg.addMemoryExport(Move(fieldName)))
            return false;
    }

    if (!d.finishSection(sectionStart))
        return Fail(cx, d, "memory section byte size mismatch");

    bool signalsForOOB = CompileArgs(cx).useSignalHandlersForOOB;
    heap.set(ArrayBufferObject::createForWasm(cx, initialSize.value(), signalsForOOB));
    if (!heap)
        return false;

    mg.initHeapUsage(HeapUsage::Unshared);
    return true;
}
Beispiel #10
0
static bool
DecodeFunctionExport(JSContext* cx, Decoder& d, ModuleGenerator& mg, CStringSet* dupSet)
{
    uint32_t funcIndex;
    if (!d.readVarU32(&funcIndex))
        return Fail(cx, d, "expected export internal index");

    if (funcIndex >= mg.numFuncSigs())
        return Fail(cx, d, "export function index out of range");

    UniqueChars fieldName = DecodeFieldName(cx, d, dupSet);
    if (!fieldName)
        return false;

    return mg.declareExport(Move(fieldName), funcIndex);
}
Beispiel #11
0
static bool
DecodeFunc(JSContext* cx, Decoder& d, ModuleGenerator& mg, uint32_t funcIndex)
{
    int64_t before = PRMJ_Now();

    FunctionGenerator fg;
    if (!mg.startFuncDef(d.currentOffset(), &fg))
        return false;

    if (!d.readCStringIf(FuncSubsection))
        return Fail(cx, d, "expected 'func' tag");

    uint32_t sectionStart;
    if (!d.startSection(&sectionStart))
        return Fail(cx, d, "expected func section byte size");

    const DeclaredSig& sig = mg.funcSig(funcIndex);
    for (ValType type : sig.args()) {
        if (!fg.addLocal(type))
            return false;
    }

    uint32_t numVars;
    if (!d.readVarU32(&numVars))
        return Fail(cx, d, "expected number of local vars");

    for (uint32_t i = 0; i < numVars; i++) {
        ValType type;
        if (!DecodeValType(cx, d, &type))
            return false;
        if (!fg.addLocal(type))
            return false;
    }

    if (!DecodeFuncBody(cx, d, mg, fg, funcIndex))
        return false;

    if (!d.finishSection(sectionStart))
        return Fail(cx, d, "func section byte size mismatch");

    int64_t after = PRMJ_Now();
    unsigned generateTime = (after - before) / PRMJ_USEC_PER_MSEC;

    return mg.finishFuncDef(funcIndex, generateTime, &fg);
}
Beispiel #12
0
// Generate a stub that is called from the synchronous, inline interrupt checks
// when the interrupt flag is set. This stub calls the C++ function to handle
// the interrupt which returns whether execution has been interrupted.
static bool
GenerateSyncInterruptStub(ModuleGenerator& mg, Label* throwLabel)
{
    MacroAssembler& masm = mg.masm();

    masm.setFramePushed(0);
    unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, ShadowStackSpace);

    ProfilingOffsets offsets;
    GenerateExitPrologue(masm, framePushed, ExitReason::Native, &offsets,
                         masm.asmSyncInterruptLabel());

    AssertStackAlignment(masm, ABIStackAlignment);
    masm.call(SymbolicAddress::HandleExecutionInterrupt);
    masm.branchIfFalseBool(ReturnReg, throwLabel);

    GenerateExitEpilogue(masm, framePushed, ExitReason::Native, &offsets);

    if (masm.oom())
        return false;

    offsets.end = masm.currentOffset();
    return mg.defineSyncInterruptStub(offsets);
}
Beispiel #13
0
static bool
DecodeCodeSection(JSContext* cx, Decoder& d, ModuleGenerator& mg)
{
    if (!mg.startFuncDefs())
        return false;

    uint32_t funcIndex = 0;
    while (d.readCStringIf(CodeSection)) {
        uint32_t sectionStart;
        if (!d.startSection(&sectionStart))
            return Fail(cx, d, "expected code section byte size");

        uint32_t numFuncs;
        if (!d.readVarU32(&numFuncs))
            return Fail(cx, d, "expected number of functions");

        if (funcIndex + numFuncs > mg.numFuncSigs())
            return Fail(cx, d, "more function definitions than declarations");

        for (uint32_t i = 0; i < numFuncs; i++) {
            if (!DecodeFunc(cx, d, mg, funcIndex++))
                return false;
        }

        if (!d.finishSection(sectionStart))
            return Fail(cx, d, "code section byte size mismatch");
    }

    if (funcIndex != mg.numFuncSigs())
        return Fail(cx, d, "fewer function definitions than declarations");

    if (!mg.finishFuncDefs())
        return false;

    return true;
}
Beispiel #14
0
static bool
DecodeFunctionSections(JSContext* cx, Decoder& d, ModuleGenerator& mg)
{
    if (!mg.startFuncDefs())
        return false;

    uint32_t funcIndex = 0;

    for (; d.readCStringIf(FuncLabel); funcIndex++) {
        if (funcIndex >= mg.numFuncSigs())
            return Fail(cx, d, "more function definitions than declarations");

        if (!DecodeFunctionSection(cx, d, mg, funcIndex))
            return false;
    }

    if (funcIndex < mg.numFuncSigs())
        return Fail(cx, d, "fewer function definitions than declarations");

    if (!mg.finishFuncDefs())
        return false;

    return true;
}
Beispiel #15
0
bool
wasm::GenerateStubs(ModuleGenerator& mg, bool usesHeap)
{
    for (unsigned i = 0; i < mg.numExports(); i++) {
        if (!GenerateEntry(mg, i, usesHeap))
            return false;
    }

    Label onThrow;

    for (size_t i = 0; i < mg.numImports(); i++) {
        ProfilingOffsets interp;
        if (!GenerateInterpExitStub(mg, i, &onThrow, &interp))
            return false;

        ProfilingOffsets jit;
        if (!GenerateJitExitStub(mg, i, usesHeap, &onThrow, &jit))
            return false;

        if (!mg.defineImport(i, interp, jit))
            return false;
    }

    if (mg.masm().asmStackOverflowLabel()->used()) {
        if (!GenerateStackOverflowStub(mg, &onThrow))
            return false;
    }

    if (mg.masm().asmSyncInterruptLabel()->used()) {
        if (!GenerateSyncInterruptStub(mg, &onThrow))
            return false;
    }

    if (mg.masm().asmOnConversionErrorLabel()->used()) {
        if (!GenerateConversionErrorStub(mg, &onThrow))
            return false;
    }

    // Generate unconditionally: the out-of-bounds exit may be used later even
    // if signal handling isn't used for out-of-bounds at the moment.
    if (!GenerateOutOfBoundsStub(mg, &onThrow))
        return false;

    // Generate unconditionally: the async interrupt may be taken at any time.
    if (!GenerateAsyncInterruptStub(mg, &onThrow))
        return false;

    if (onThrow.used()) {
        if (!GenerateThrowStub(mg, &onThrow))
            return false;
    }

    return true;
}
Beispiel #16
0
static bool
DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg, uint32_t funcIndex)
{
    uint32_t bodySize;
    if (!d.readVarU32(&bodySize))
        return d.fail("expected number of function body bytes");

    if (bodySize > MaxFunctionBytes)
        return d.fail("function body too big");

    const size_t offsetInModule = d.currentOffset();

    // Skip over the function body; it will be validated by the compilation thread.
    const uint8_t* bodyBegin;
    if (!d.readBytes(bodySize, &bodyBegin))
        return d.fail("function body length too big");

    return mg.compileFuncDef(funcIndex, offsetInModule, bodyBegin, bodyBegin + bodySize);
}
Beispiel #17
0
static bool
DecodeMemorySection(JSContext* cx, Decoder& d, ModuleGenerator& mg,
                    MutableHandle<ArrayBufferObject*> heap)
{
    if (!d.readCStringIf(MemoryLabel))
        return true;

    uint32_t sectionStart;
    if (!d.startSection(&sectionStart))
        return Fail(cx, d, "expected memory section byte size");

    if (!d.readCStringIf(InitialLabel))
        return Fail(cx, d, "expected memory section initial field");

    uint32_t initialHeapSize;
    if (!d.readVarU32(&initialHeapSize))
        return Fail(cx, d, "expected initial memory size");

    if (initialHeapSize < PageSize || initialHeapSize % PageSize != 0)
        return Fail(cx, d, "initial memory size not a multiple of 0x10000");

    if (initialHeapSize > INT32_MAX)
        return Fail(cx, d, "initial memory size too big");

    if (!d.readCStringIf(EndLabel))
        return Fail(cx, d, "expected end field of memory section");

    if (!d.finishSection(sectionStart))
        return Fail(cx, d, "memory section byte size mismatch");

    bool signalsForOOB = CompileArgs(cx).useSignalHandlersForOOB;
    heap.set(ArrayBufferObject::createForWasm(cx, initialHeapSize, signalsForOOB));
    if (!heap)
        return false;

    mg.initHeapUsage(HeapUsage::Unshared);
    return true;
}
Beispiel #18
0
// Generate a stub that enters wasm from a C++ caller via the native ABI.
// The signature of the entry point is Module::CodePtr. The exported wasm
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of CodePtr to the export's signature's ABI.
static bool
GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, bool usesHeap)
{
    MacroAssembler& masm = mg.masm();
    const Sig& sig = mg.exportSig(exportIndex);

    masm.haltingAlign(CodeAlignment);

    Offsets offsets;
    offsets.begin = masm.currentOffset();

    // Save the return address if it wasn't already saved by the call insn.
#if defined(JS_CODEGEN_ARM)
    masm.push(lr);
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
    masm.push(ra);
#elif defined(JS_CODEGEN_X86)
    static const unsigned EntryFrameSize = sizeof(void*);
#endif

    // Save all caller non-volatile registers before we clobber them here and in
    // the asm.js callee (which does not preserve non-volatile registers).
    masm.setFramePushed(0);
    masm.PushRegsInMask(NonVolatileRegs);
    MOZ_ASSERT(masm.framePushed() == FramePushedAfterSave);

    // ARM and MIPS/MIPS64 have a globally-pinned GlobalReg (x64 uses RIP-relative
    // addressing, x86 uses immediates in effective addresses). For the
    // AsmJSGlobalRegBias addition, see Assembler-(mips,arm).h.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
    masm.movePtr(IntArgReg1, GlobalReg);
    masm.addPtr(Imm32(AsmJSGlobalRegBias), GlobalReg);
#endif

    // ARM, MIPS/MIPS64 and x64 have a globally-pinned HeapReg (x86 uses immediates in
    // effective addresses). Loading the heap register depends on the global
    // register already having been loaded.
    if (usesHeap)
        masm.loadAsmJSHeapRegisterFromGlobalData();

    // Put the 'argv' argument into a non-argument/return register so that we
    // can use 'argv' while we fill in the arguments for the asm.js callee.
    // Also, save 'argv' on the stack so that we can recover it after the call.
    // Use a second non-argument/return register as temporary scratch.
    Register argv = ABIArgGenerator::NonArgReturnReg0;
    Register scratch = ABIArgGenerator::NonArgReturnReg1;
#if defined(JS_CODEGEN_X86)
    masm.loadPtr(Address(masm.getStackPointer(), EntryFrameSize + masm.framePushed()), argv);
#else
    masm.movePtr(IntArgReg0, argv);
#endif
    masm.Push(argv);

    // Save the stack pointer to the saved non-volatile registers. We will use
    // this on two paths: normal return and exceptional return. Since
    // loadWasmActivation uses GlobalReg, we must do this after loading
    // GlobalReg.
    MOZ_ASSERT(masm.framePushed() == FramePushedForEntrySP);
    masm.loadWasmActivation(scratch);
    masm.storeStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));

    // Dynamically align the stack since ABIStackAlignment is not necessarily
    // AsmJSStackAlignment. We'll use entrySP to recover the original stack
    // pointer on return.
    masm.andToStackPtr(Imm32(~(AsmJSStackAlignment - 1)));

    // Bump the stack for the call.
    masm.reserveStack(AlignBytes(StackArgBytes(sig.args()), AsmJSStackAlignment));

    // Copy parameters out of argv and into the registers/stack-slots specified by
    // the system ABI.
    for (ABIArgValTypeIter iter(sig.args()); !iter.done(); iter++) {
        unsigned argOffset = iter.index() * Module::SizeOfEntryArg;
        Address src(argv, argOffset);
        MIRType type = iter.mirType();
        switch (iter->kind()) {
          case ABIArg::GPR:
            masm.load32(src, iter->gpr());
            break;
#ifdef JS_CODEGEN_REGISTER_PAIR
          case ABIArg::GPR_PAIR:
            MOZ_CRASH("wasm uses hardfp for function calls.");
            break;
#endif
          case ABIArg::FPU: {
            static_assert(Module::SizeOfEntryArg >= jit::Simd128DataSize,
                          "EntryArg must be big enough to store SIMD values");
            switch (type) {
              case MIRType_Int32x4:
              case MIRType_Bool32x4:
                masm.loadUnalignedInt32x4(src, iter->fpu());
                break;
              case MIRType_Float32x4:
                masm.loadUnalignedFloat32x4(src, iter->fpu());
                break;
              case MIRType_Double:
                masm.loadDouble(src, iter->fpu());
                break;
              case MIRType_Float32:
                masm.loadFloat32(src, iter->fpu());
                break;
              default:
                MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
                break;
            }
            break;
          }
          case ABIArg::Stack:
            switch (type) {
              case MIRType_Int32:
                masm.load32(src, scratch);
                masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                break;
              case MIRType_Double:
                masm.loadDouble(src, ScratchDoubleReg);
                masm.storeDouble(ScratchDoubleReg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                break;
              case MIRType_Float32:
                masm.loadFloat32(src, ScratchFloat32Reg);
                masm.storeFloat32(ScratchFloat32Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                break;
              case MIRType_Int32x4:
              case MIRType_Bool32x4:
                masm.loadUnalignedInt32x4(src, ScratchSimd128Reg);
                masm.storeAlignedInt32x4(ScratchSimd128Reg,
                                         Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                break;
              case MIRType_Float32x4:
                masm.loadUnalignedFloat32x4(src, ScratchSimd128Reg);
                masm.storeAlignedFloat32x4(ScratchSimd128Reg,
                                           Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                break;
              default:
                MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
            }
            break;
        }
    }

    // Call into the real function.
    masm.assertStackAlignment(AsmJSStackAlignment);
    Label target;
    target.bind(mg.funcEntryOffsets()[mg.exportFuncIndex(exportIndex)]);
    masm.call(CallSiteDesc(CallSiteDesc::Relative), &target);

    // Recover the stack pointer value before dynamic alignment.
    masm.loadWasmActivation(scratch);
    masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
    masm.setFramePushed(FramePushedForEntrySP);

    // Recover the 'argv' pointer which was saved before aligning the stack.
    masm.Pop(argv);

    // Store the return value in argv[0]
    switch (sig.ret()) {
      case ExprType::Void:
        break;
      case ExprType::I32:
        masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0));
        break;
      case ExprType::I64:
        MOZ_CRASH("no int64 in asm.js");
      case ExprType::F32:
        masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
        MOZ_FALLTHROUGH; // as ReturnDoubleReg now contains a Double
      case ExprType::F64:
        masm.canonicalizeDouble(ReturnDoubleReg);
        masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
        break;
      case ExprType::I32x4:
      case ExprType::B32x4:
        // We don't have control on argv alignment, do an unaligned access.
        masm.storeUnalignedInt32x4(ReturnSimd128Reg, Address(argv, 0));
        break;
      case ExprType::F32x4:
        // We don't have control on argv alignment, do an unaligned access.
        masm.storeUnalignedFloat32x4(ReturnSimd128Reg, Address(argv, 0));
        break;
    }

    // Restore clobbered non-volatile registers of the caller.
    masm.PopRegsInMask(NonVolatileRegs);
    MOZ_ASSERT(masm.framePushed() == 0);

    masm.move32(Imm32(true), ReturnReg);
    masm.ret();

    if (masm.oom())
        return false;

    offsets.end = masm.currentOffset();
    return mg.defineExport(exportIndex, offsets);
}
Beispiel #19
0
// The async interrupt-callback exit is called from arbitrarily-interrupted asm.js
// code. That means we must first save *all* registers and restore *all*
// registers (except the stack pointer) when we resume. The address to resume to
// (assuming that js::HandleExecutionInterrupt doesn't indicate that the
// execution should be aborted) is stored in WasmActivation::resumePC_.
// Unfortunately, loading this requires a scratch register which we don't have
// after restoring all registers. To hack around this, push the resumePC on the
// stack so that it can be popped directly into PC.
static bool
GenerateAsyncInterruptStub(ModuleGenerator& mg, Label* throwLabel)
{
    MacroAssembler& masm = mg.masm();

    masm.haltingAlign(CodeAlignment);
    Offsets offsets;
    offsets.begin = masm.currentOffset();

#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
    // Be very careful here not to perturb the machine state before saving it
    // to the stack. In particular, add/sub instructions may set conditions in
    // the flags register.
    masm.push(Imm32(0));            // space for resumePC
    masm.pushFlags();               // after this we are safe to use sub
    masm.setFramePushed(0);         // set to zero so we can use masm.framePushed() below
    masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)

    Register scratch = ABIArgGenerator::NonArgReturnReg0;

    // Store resumePC into the reserved space.
    masm.loadWasmActivation(scratch);
    masm.loadPtr(Address(scratch, WasmActivation::offsetOfResumePC()), scratch);
    masm.storePtr(scratch, Address(masm.getStackPointer(), masm.framePushed() + sizeof(void*)));

    // We know that StackPointer is word-aligned, but not necessarily
    // stack-aligned, so we need to align it dynamically.
    masm.moveStackPtrTo(ABIArgGenerator::NonVolatileReg);
    masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
    if (ShadowStackSpace)
        masm.subFromStackPtr(Imm32(ShadowStackSpace));

    masm.assertStackAlignment(ABIStackAlignment);
    masm.call(SymbolicAddress::HandleExecutionInterrupt);

    masm.branchIfFalseBool(ReturnReg, throwLabel);

    // Restore the StackPointer to its position before the call.
    masm.moveToStackPtr(ABIArgGenerator::NonVolatileReg);

    // Restore the machine state to before the interrupt.
    masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP)
    masm.popFlags();              // after this, nothing that sets conditions
    masm.ret();                   // pop resumePC into PC
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
    // Reserve space to store resumePC.
    masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
    // set to zero so we can use masm.framePushed() below.
    masm.setFramePushed(0);
    // When this platform supports SIMD extensions, we'll need to push high lanes
    // of SIMD registers as well.
    JS_STATIC_ASSERT(!SupportsSimd);
    // save all registers,except sp. After this stack is alligned.
    masm.PushRegsInMask(AllRegsExceptSP);

    // Save the stack pointer in a non-volatile register.
    masm.moveStackPtrTo(s0);
    // Align the stack.
    masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));

    // Store resumePC into the reserved space.
    masm.loadWasmActivation(IntArgReg0);
    masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
    masm.storePtr(IntArgReg1, Address(s0, masm.framePushed()));

    // MIPS ABI requires rewserving stack for registes $a0 to $a3.
    masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));

    masm.assertStackAlignment(ABIStackAlignment);
    masm.call(SymbolicAddress::HandleExecutionInterrupt);

    masm.addToStackPtr(Imm32(4 * sizeof(intptr_t)));

    masm.branchIfFalseBool(ReturnReg, throwLabel);

    // This will restore stack to the address before the call.
    masm.moveToStackPtr(s0);
    masm.PopRegsInMask(AllRegsExceptSP);

    // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
    // during jump delay slot.
    masm.pop(HeapReg);
    masm.as_jr(HeapReg);
    masm.loadAsmJSHeapRegisterFromGlobalData();
#elif defined(JS_CODEGEN_ARM)
    masm.setFramePushed(0);         // set to zero so we can use masm.framePushed() below

    // Save all GPR, except the stack pointer.
    masm.PushRegsInMask(LiveRegisterSet(
                            GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)),
                            FloatRegisterSet(uint32_t(0))));

    // Save both the APSR and FPSCR in non-volatile registers.
    masm.as_mrs(r4);
    masm.as_vmrs(r5);
    // Save the stack pointer in a non-volatile register.
    masm.mov(sp,r6);
    // Align the stack.
    masm.ma_and(Imm32(~7), sp, sp);

    // Store resumePC into the return PC stack slot.
    masm.loadWasmActivation(IntArgReg0);
    masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
    masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*)));

    // When this platform supports SIMD extensions, we'll need to push and pop
    // high lanes of SIMD registers as well.

    // Save all FP registers
    JS_STATIC_ASSERT(!SupportsSimd);
    masm.PushRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
                                        FloatRegisterSet(FloatRegisters::AllDoubleMask)));

    masm.assertStackAlignment(ABIStackAlignment);
    masm.call(SymbolicAddress::HandleExecutionInterrupt);

    masm.branchIfFalseBool(ReturnReg, throwLabel);

    // Restore the machine state to before the interrupt. this will set the pc!

    // Restore all FP registers
    masm.PopRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
                                       FloatRegisterSet(FloatRegisters::AllDoubleMask)));
    masm.mov(r6,sp);
    masm.as_vmsr(r5);
    masm.as_msr(r4);
    // Restore all GP registers
    masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
    masm.transferReg(r0);
    masm.transferReg(r1);
    masm.transferReg(r2);
    masm.transferReg(r3);
    masm.transferReg(r4);
    masm.transferReg(r5);
    masm.transferReg(r6);
    masm.transferReg(r7);
    masm.transferReg(r8);
    masm.transferReg(r9);
    masm.transferReg(r10);
    masm.transferReg(r11);
    masm.transferReg(r12);
    masm.transferReg(lr);
    masm.finishDataTransfer();
    masm.ret();
#elif defined(JS_CODEGEN_ARM64)
    MOZ_CRASH();
#elif defined (JS_CODEGEN_NONE)
    MOZ_CRASH();
#else
# error "Unknown architecture!"
#endif

    if (masm.oom())
        return false;

    offsets.end = masm.currentOffset();
    return mg.defineAsyncInterruptStub(offsets);
}
Beispiel #20
0
// Generate a stub that is called via the internal ABI derived from the
// signature of the import and calls into an appropriate InvokeImport C++
// function, having boxed all the ABI arguments into a homogeneous Value array.
static bool
GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Label* throwLabel,
                       ProfilingOffsets* offsets)
{
    MacroAssembler& masm = mg.masm();
    const Sig& sig = *mg.import(importIndex).sig;

    masm.setFramePushed(0);

    // Argument types for InvokeImport_*:
    static const MIRType typeArray[] = { MIRType_Pointer,   // ImportExit
                                         MIRType_Int32,     // argc
                                         MIRType_Pointer }; // argv
    MIRTypeVector invokeArgTypes;
    MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));

    // At the point of the call, the stack layout shall be (sp grows to the left):
    //   | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
    // The padding between stack args and argv ensures that argv is aligned. The
    // padding between argv and retaddr ensures that sp is aligned.
    unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
    unsigned argBytes = Max<size_t>(1, sig.args().length()) * sizeof(Value);
    unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);

    GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, offsets);

    // Fill the argument array.
    unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
    Register scratch = ABIArgGenerator::NonArgReturnReg0;
    FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);

    // Prepare the arguments for the call to InvokeImport_*.
    ABIArgMIRTypeIter i(invokeArgTypes);

    // argument 0: importIndex
    if (i->kind() == ABIArg::GPR)
        masm.mov(ImmWord(importIndex), i->gpr());
    else
        masm.store32(Imm32(importIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
    i++;

    // argument 1: argc
    unsigned argc = sig.args().length();
    if (i->kind() == ABIArg::GPR)
        masm.mov(ImmWord(argc), i->gpr());
    else
        masm.store32(Imm32(argc), Address(masm.getStackPointer(), i->offsetFromArgBase()));
    i++;

    // argument 2: argv
    Address argv(masm.getStackPointer(), argOffset);
    if (i->kind() == ABIArg::GPR) {
        masm.computeEffectiveAddress(argv, i->gpr());
    } else {
        masm.computeEffectiveAddress(argv, scratch);
        masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
    }
    i++;
    MOZ_ASSERT(i.done());

    // Make the call, test whether it succeeded, and extract the return value.
    AssertStackAlignment(masm, ABIStackAlignment);
    switch (sig.ret()) {
      case ExprType::Void:
        masm.call(SymbolicAddress::InvokeImport_Void);
        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
        break;
      case ExprType::I32:
        masm.call(SymbolicAddress::InvokeImport_I32);
        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
        masm.unboxInt32(argv, ReturnReg);
        break;
      case ExprType::I64:
        MOZ_CRASH("no int64 in asm.js");
      case ExprType::F32:
        MOZ_CRASH("Float32 shouldn't be returned from a FFI");
      case ExprType::F64:
        masm.call(SymbolicAddress::InvokeImport_F64);
        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
        masm.loadDouble(argv, ReturnDoubleReg);
        break;
      case ExprType::I32x4:
      case ExprType::F32x4:
      case ExprType::B32x4:
        MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
    }

    GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, offsets);

    if (masm.oom())
        return false;

    offsets->end = masm.currentOffset();
    return true;
}
Beispiel #21
0
// Generate a stub that is called via the internal ABI derived from the
// signature of the import and calls into a compatible JIT function,
// having boxed all the ABI arguments into the JIT stack frame layout.
static bool
GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
                    Label* throwLabel, ProfilingOffsets* offsets)
{
    MacroAssembler& masm = mg.masm();
    const Sig& sig = *mg.import(importIndex).sig;

    masm.setFramePushed(0);

    // JIT calls use the following stack layout (sp grows to the left):
    //   | retaddr | descriptor | callee | argc | this | arg1..N |
    // After the JIT frame, the global register (if present) is saved since the
    // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
    // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
    // the return address.
    static_assert(AsmJSStackAlignment >= JitStackAlignment, "subsumes");
    unsigned sizeOfRetAddr = sizeof(void*);
    unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + sig.args().length()) * sizeof(Value);
    unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + MaybeSavedGlobalReg;
    unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
                              sizeOfRetAddr;

    GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, offsets);

    // 1. Descriptor
    size_t argOffset = 0;
    uint32_t descriptor = MakeFrameDescriptor(jitFramePushed, JitFrame_Entry);
    masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
    argOffset += sizeof(size_t);

    // 2. Callee
    Register callee = ABIArgGenerator::NonArgReturnReg0;   // live until call
    Register scratch = ABIArgGenerator::NonArgReturnReg1;  // repeatedly clobbered

    // 2.1. Get ExitDatum
    unsigned globalDataOffset = mg.import(importIndex).globalDataOffset;
#if defined(JS_CODEGEN_X64)
    masm.append(AsmJSGlobalAccess(masm.leaRipRelative(callee), globalDataOffset));
#elif defined(JS_CODEGEN_X86)
    masm.append(AsmJSGlobalAccess(masm.movlWithPatch(Imm32(0), callee), globalDataOffset));
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
    masm.computeEffectiveAddress(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), callee);
#endif

    // 2.2. Get callee
    masm.loadPtr(Address(callee, Module::OffsetOfImportExitFun), callee);

    // 2.3. Save callee
    masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
    argOffset += sizeof(size_t);

    // 2.4. Load callee executable entry point
    masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
    masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);

    // 3. Argc
    unsigned argc = sig.args().length();
    masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
    argOffset += sizeof(size_t);

    // 4. |this| value
    masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
    argOffset += sizeof(Value);

    // 5. Fill the arguments
    unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(AsmJSFrame);
    FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);
    argOffset += sig.args().length() * sizeof(Value);
    MOZ_ASSERT(argOffset == jitFrameBytes);

    // 6. Jit code will clobber all registers, even non-volatiles. GlobalReg and
    //    HeapReg are removed from the general register set for asm.js code, so
    //    these will not have been saved by the caller like all other registers,
    //    so they must be explicitly preserved. Only save GlobalReg since
    //    HeapReg can be reloaded (from global data) after the call.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
    static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting");
    masm.storePtr(GlobalReg, Address(masm.getStackPointer(), jitFrameBytes));
#endif

    {
        // Enable Activation.
        //
        // This sequence requires four registers, and needs to preserve the 'callee'
        // register, so there are five live registers.
        MOZ_ASSERT(callee == AsmJSIonExitRegCallee);
        Register reg0 = AsmJSIonExitRegE0;
        Register reg1 = AsmJSIonExitRegE1;
        Register reg2 = AsmJSIonExitRegE2;
        Register reg3 = AsmJSIonExitRegE3;

        // The following is inlined:
        //   JSContext* cx = activation->cx();
        //   Activation* act = cx->runtime()->activation();
        //   act.active_ = true;
        //   act.prevJitTop_ = cx->runtime()->jitTop;
        //   act.prevJitJSContext_ = cx->runtime()->jitJSContext;
        //   cx->runtime()->jitJSContext = cx;
        //   act.prevJitActivation_ = cx->runtime()->jitActivation;
        //   cx->runtime()->jitActivation = act;
        //   act.prevProfilingActivation_ = cx->runtime()->profilingActivation;
        //   cx->runtime()->profilingActivation_ = act;
        // On the ARM store8() uses the secondScratchReg (lr) as a temp.
        size_t offsetOfActivation = JSRuntime::offsetOfActivation();
        size_t offsetOfJitTop = offsetof(JSRuntime, jitTop);
        size_t offsetOfJitJSContext = offsetof(JSRuntime, jitJSContext);
        size_t offsetOfJitActivation = offsetof(JSRuntime, jitActivation);
        size_t offsetOfProfilingActivation = JSRuntime::offsetOfProfilingActivation();
        masm.loadWasmActivation(reg0);
        masm.loadPtr(Address(reg0, WasmActivation::offsetOfContext()), reg3);
        masm.loadPtr(Address(reg3, JSContext::offsetOfRuntime()), reg0);
        masm.loadPtr(Address(reg0, offsetOfActivation), reg1);

        //   act.active_ = true;
        masm.store8(Imm32(1), Address(reg1, JitActivation::offsetOfActiveUint8()));

        //   act.prevJitTop_ = cx->runtime()->jitTop;
        masm.loadPtr(Address(reg0, offsetOfJitTop), reg2);
        masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevJitTop()));

        //   act.prevJitJSContext_ = cx->runtime()->jitJSContext;
        masm.loadPtr(Address(reg0, offsetOfJitJSContext), reg2);
        masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevJitJSContext()));
        //   cx->runtime()->jitJSContext = cx;
        masm.storePtr(reg3, Address(reg0, offsetOfJitJSContext));

        //   act.prevJitActivation_ = cx->runtime()->jitActivation;
        masm.loadPtr(Address(reg0, offsetOfJitActivation), reg2);
        masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevJitActivation()));
        //   cx->runtime()->jitActivation = act;
        masm.storePtr(reg1, Address(reg0, offsetOfJitActivation));

        //   act.prevProfilingActivation_ = cx->runtime()->profilingActivation;
        masm.loadPtr(Address(reg0, offsetOfProfilingActivation), reg2);
        masm.storePtr(reg2, Address(reg1, Activation::offsetOfPrevProfiling()));
        //   cx->runtime()->profilingActivation_ = act;
        masm.storePtr(reg1, Address(reg0, offsetOfProfilingActivation));
    }

    AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
    masm.callJitNoProfiler(callee);
    AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);

    {
        // Disable Activation.
        //
        // This sequence needs three registers, and must preserve the JSReturnReg_Data and
        // JSReturnReg_Type, so there are five live registers.
        MOZ_ASSERT(JSReturnReg_Data == AsmJSIonExitRegReturnData);
        MOZ_ASSERT(JSReturnReg_Type == AsmJSIonExitRegReturnType);
        Register reg0 = AsmJSIonExitRegD0;
        Register reg1 = AsmJSIonExitRegD1;
        Register reg2 = AsmJSIonExitRegD2;

        // The following is inlined:
        //   rt->profilingActivation = prevProfilingActivation_;
        //   rt->activation()->active_ = false;
        //   rt->jitTop = prevJitTop_;
        //   rt->jitJSContext = prevJitJSContext_;
        //   rt->jitActivation = prevJitActivation_;
        // On the ARM store8() uses the secondScratchReg (lr) as a temp.
        size_t offsetOfActivation = JSRuntime::offsetOfActivation();
        size_t offsetOfJitTop = offsetof(JSRuntime, jitTop);
        size_t offsetOfJitJSContext = offsetof(JSRuntime, jitJSContext);
        size_t offsetOfJitActivation = offsetof(JSRuntime, jitActivation);
        size_t offsetOfProfilingActivation = JSRuntime::offsetOfProfilingActivation();

        masm.movePtr(SymbolicAddress::Runtime, reg0);
        masm.loadPtr(Address(reg0, offsetOfActivation), reg1);

        //   rt->jitTop = prevJitTop_;
        masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevJitTop()), reg2);
        masm.storePtr(reg2, Address(reg0, offsetOfJitTop));

        //   rt->profilingActivation = rt->activation()->prevProfiling_;
        masm.loadPtr(Address(reg1, Activation::offsetOfPrevProfiling()), reg2);
        masm.storePtr(reg2, Address(reg0, offsetOfProfilingActivation));

        //   rt->activation()->active_ = false;
        masm.store8(Imm32(0), Address(reg1, JitActivation::offsetOfActiveUint8()));

        //   rt->jitJSContext = prevJitJSContext_;
        masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevJitJSContext()), reg2);
        masm.storePtr(reg2, Address(reg0, offsetOfJitJSContext));

        //   rt->jitActivation = prevJitActivation_;
        masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevJitActivation()), reg2);
        masm.storePtr(reg2, Address(reg0, offsetOfJitActivation));
    }

    // Reload the global register since JIT code can clobber any register.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
    static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting");
    masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes), GlobalReg);
#endif

    // As explained above, the frame was aligned for the JIT ABI such that
    //   (sp + sizeof(void*)) % JitStackAlignment == 0
    // But now we possibly want to call one of several different C++ functions,
    // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
    static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
    masm.reserveStack(sizeOfRetAddr);
    unsigned nativeFramePushed = masm.framePushed();
    AssertStackAlignment(masm, ABIStackAlignment);

    masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);

    Label oolConvert;
    switch (sig.ret()) {
      case ExprType::Void:
        break;
      case ExprType::I32:
        masm.convertValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert,
                                 /* -0 check */ false);
        break;
      case ExprType::I64:
        MOZ_CRASH("no int64 in asm.js");
      case ExprType::F32:
        MOZ_CRASH("Float shouldn't be returned from an import");
      case ExprType::F64:
        masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
        break;
      case ExprType::I32x4:
      case ExprType::F32x4:
      case ExprType::B32x4:
        MOZ_CRASH("SIMD types shouldn't be returned from an import");
    }

    Label done;
    masm.bind(&done);

    // Ion code does not respect system callee-saved register conventions so
    // reload the heap register.
    if (usesHeap)
        masm.loadAsmJSHeapRegisterFromGlobalData();

    GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, offsets);

    if (oolConvert.used()) {
        masm.bind(&oolConvert);
        masm.setFramePushed(nativeFramePushed);

        // Coercion calls use the following stack layout (sp grows to the left):
        //   | args | padding | Value argv[1] | padding | exit AsmJSFrame |
        MIRTypeVector coerceArgTypes;
        JS_ALWAYS_TRUE(coerceArgTypes.append(MIRType_Pointer));
        unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
        MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
        AssertStackAlignment(masm, ABIStackAlignment);

        // Store return value into argv[0]
        masm.storeValue(JSReturnOperand, Address(masm.getStackPointer(), offsetToCoerceArgv));

        // argument 0: argv
        ABIArgMIRTypeIter i(coerceArgTypes);
        Address argv(masm.getStackPointer(), offsetToCoerceArgv);
        if (i->kind() == ABIArg::GPR) {
            masm.computeEffectiveAddress(argv, i->gpr());
        } else {
            masm.computeEffectiveAddress(argv, scratch);
            masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
        }
        i++;
        MOZ_ASSERT(i.done());

        // Call coercion function
        AssertStackAlignment(masm, ABIStackAlignment);
        switch (sig.ret()) {
          case ExprType::I32:
            masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
            masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
            masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
            break;
          case ExprType::F64:
            masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
            masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
            masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
            break;
          default:
            MOZ_CRASH("Unsupported convert type");
        }

        masm.jump(&done);
        masm.setFramePushed(0);
    }

    MOZ_ASSERT(masm.framePushed() == 0);

    if (masm.oom())
        return false;

    offsets->end = masm.currentOffset();
    return true;
}