bool ModuleGenerator::finishCodegen() { uint32_t offsetInWhole = masm_.size(); uint32_t numFuncExports = metadata_->funcExports.length(); MOZ_ASSERT(numFuncExports == exportedFuncs_.count()); // Generate stubs in a separate MacroAssembler since, otherwise, for modules // larger than the JumpImmediateRange, even local uses of Label will fail // due to the large absolute offsets temporarily stored by Label::bind(). OffsetVector entries; ProfilingOffsetVector interpExits; ProfilingOffsetVector jitExits; EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets; Offsets interruptExit; { TempAllocator alloc(&lifo_); MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc); if (!entries.resize(numFuncExports)) return false; for (uint32_t i = 0; i < numFuncExports; i++) entries[i] = GenerateEntry(masm, metadata_->funcExports[i]); if (!interpExits.resize(numFuncImports())) return false; if (!jitExits.resize(numFuncImports())) return false; for (uint32_t i = 0; i < numFuncImports(); i++) { interpExits[i] = GenerateInterpExit(masm, metadata_->funcImports[i], i); jitExits[i] = GenerateJitExit(masm, metadata_->funcImports[i]); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) jumpTargets[target] = GenerateJumpTarget(masm, target); interruptExit = GenerateInterruptStub(masm); if (masm.oom() || !masm_.asmMergeWith(masm)) return false; } // Adjust each of the resulting Offsets (to account for being merged into // masm_) and then create code ranges for all the stubs. for (uint32_t i = 0; i < numFuncExports; i++) { entries[i].offsetBy(offsetInWhole); metadata_->funcExports[i].initEntryOffset(entries[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i])) return false; } for (uint32_t i = 0; i < numFuncImports(); i++) { interpExits[i].offsetBy(offsetInWhole); metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i])) return false; jitExits[i].offsetBy(offsetInWhole); metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i])) return false; } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { jumpTargets[target].offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target])) return false; } interruptExit.offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit)) return false; // Fill in LinkData with the offsets of these stubs. linkData_.interruptOffset = interruptExit.begin; linkData_.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin; linkData_.unalignedAccessOffset = jumpTargets[JumpTarget::UnalignedAccess].begin; linkData_.badIndirectCallOffset = jumpTargets[JumpTarget::BadIndirectCall].begin; // Only call convertOutOfRangeBranchesToThunks after all other codegen that may // emit new jumps to JumpTargets has finished. if (!convertOutOfRangeBranchesToThunks()) return false; // Now that all thunks have been generated, patch all the thunks. for (CallThunk& callThunk : metadata_->callThunks) { uint32_t funcIndex = callThunk.u.funcIndex; callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex]; masm_.patchThunk(callThunk.offset, funcCodeRange(funcIndex).funcNonProfilingEntry()); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { for (uint32_t thunkOffset : jumpThunks_[target]) masm_.patchThunk(thunkOffset, jumpTargets[target].begin); } // Code-generation is complete! masm_.finish(); return !masm_.oom(); }
bool ModuleGenerator::finishCodegen(StaticLinkData* link) { uint32_t offsetInWhole = masm_.size(); // Generate stubs in a separate MacroAssembler since, otherwise, for modules // larger than the JumpImmediateRange, even local uses of Label will fail // due to the large absolute offsets temporarily stored by Label::bind(). Vector<Offsets> entries(cx_); Vector<ProfilingOffsets> interpExits(cx_); Vector<ProfilingOffsets> jitExits(cx_); EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets; ProfilingOffsets badIndirectCallExit; Offsets interruptExit; { TempAllocator alloc(&lifo_); MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc); if (!entries.resize(numExports())) return false; for (uint32_t i = 0; i < numExports(); i++) { uint32_t target = exportMap_->exportFuncIndices[i]; const Sig& sig = module_->exports[i].sig(); entries[i] = GenerateEntry(masm, target, sig, usesHeap()); } if (!interpExits.resize(numImports())) return false; if (!jitExits.resize(numImports())) return false; for (uint32_t i = 0; i < numImports(); i++) { interpExits[i] = GenerateInterpExit(masm, module_->imports[i], i); jitExits[i] = GenerateJitExit(masm, module_->imports[i], usesHeap()); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) jumpTargets[target] = GenerateJumpTarget(masm, target); badIndirectCallExit = GenerateBadIndirectCallExit(masm); interruptExit = GenerateInterruptStub(masm); if (masm.oom() || !masm_.asmMergeWith(masm)) return false; } // Adjust each of the resulting Offsets (to account for being merged into // masm_) and then create code ranges for all the stubs. for (uint32_t i = 0; i < numExports(); i++) { entries[i].offsetBy(offsetInWhole); module_->exports[i].initStubOffset(entries[i].begin); if (!module_->codeRanges.emplaceBack(CodeRange::Entry, entries[i])) return false; } for (uint32_t i = 0; i < numImports(); i++) { interpExits[i].offsetBy(offsetInWhole); module_->imports[i].initInterpExitOffset(interpExits[i].begin); if (!module_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i])) return false; jitExits[i].offsetBy(offsetInWhole); module_->imports[i].initJitExitOffset(jitExits[i].begin); if (!module_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i])) return false; } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { jumpTargets[target].offsetBy(offsetInWhole); if (!module_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target])) return false; } badIndirectCallExit.offsetBy(offsetInWhole); if (!module_->codeRanges.emplaceBack(CodeRange::ErrorExit, badIndirectCallExit)) return false; interruptExit.offsetBy(offsetInWhole); if (!module_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit)) return false; // Fill in StaticLinkData with the offsets of these stubs. link->pod.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin; link->pod.interruptOffset = interruptExit.begin; for (uint32_t sigIndex = 0; sigIndex < numSigs_; sigIndex++) { const TableModuleGeneratorData& table = shared_->sigToTable[sigIndex]; if (table.elemFuncIndices.empty()) continue; Uint32Vector elemOffsets; if (!elemOffsets.resize(table.elemFuncIndices.length())) return false; for (size_t i = 0; i < table.elemFuncIndices.length(); i++) { uint32_t funcIndex = table.elemFuncIndices[i]; if (funcIndex == BadIndirectCall) elemOffsets[i] = badIndirectCallExit.begin; else elemOffsets[i] = funcEntry(funcIndex); } if (!link->funcPtrTables.emplaceBack(table.globalDataOffset, Move(elemOffsets))) return false; } // Only call convertOutOfRangeBranchesToThunks after all other codegen that may // emit new jumps to JumpTargets has finished. if (!convertOutOfRangeBranchesToThunks()) return false; // Now that all thunks have been generated, patch all the thunks. for (CallThunk& callThunk : module_->callThunks) { uint32_t funcIndex = callThunk.u.funcIndex; callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex]; masm_.patchThunk(callThunk.offset, funcEntry(funcIndex)); } for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) { for (uint32_t thunkOffset : jumpThunks_[target]) masm_.patchThunk(thunkOffset, jumpTargets[target].begin); } // Code-generation is complete! masm_.finish(); return !masm_.oom(); }
bool ModuleGenerator::finishCodegen() { masm_.haltingAlign(CodeAlignment); uint32_t offsetInWhole = masm_.size(); uint32_t numFuncExports = metadata_->funcExports.length(); MOZ_ASSERT(numFuncExports == exportedFuncs_.count()); // Generate stubs in a separate MacroAssembler since, otherwise, for modules // larger than the JumpImmediateRange, even local uses of Label will fail // due to the large absolute offsets temporarily stored by Label::bind(). OffsetVector entries; ProfilingOffsetVector interpExits; ProfilingOffsetVector jitExits; TrapExitOffsetArray trapExits; Offsets outOfBoundsExit; Offsets unalignedAccessExit; Offsets interruptExit; Offsets throwStub; { TempAllocator alloc(&lifo_); MacroAssembler masm(MacroAssembler::WasmToken(), alloc); Label throwLabel; if (!entries.resize(numFuncExports)) return false; for (uint32_t i = 0; i < numFuncExports; i++) entries[i] = GenerateEntry(masm, metadata_->funcExports[i]); if (!interpExits.resize(numFuncImports())) return false; if (!jitExits.resize(numFuncImports())) return false; for (uint32_t i = 0; i < numFuncImports(); i++) { interpExits[i] = GenerateImportInterpExit(masm, metadata_->funcImports[i], i, &throwLabel); jitExits[i] = GenerateImportJitExit(masm, metadata_->funcImports[i], &throwLabel); } for (Trap trap : MakeEnumeratedRange(Trap::Limit)) trapExits[trap] = GenerateTrapExit(masm, trap, &throwLabel); outOfBoundsExit = GenerateOutOfBoundsExit(masm, &throwLabel); unalignedAccessExit = GenerateUnalignedExit(masm, &throwLabel); interruptExit = GenerateInterruptExit(masm, &throwLabel); throwStub = GenerateThrowStub(masm, &throwLabel); if (masm.oom() || !masm_.asmMergeWith(masm)) return false; } // Adjust each of the resulting Offsets (to account for being merged into // masm_) and then create code ranges for all the stubs. for (uint32_t i = 0; i < numFuncExports; i++) { entries[i].offsetBy(offsetInWhole); metadata_->funcExports[i].initEntryOffset(entries[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i])) return false; } for (uint32_t i = 0; i < numFuncImports(); i++) { interpExits[i].offsetBy(offsetInWhole); metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i])) return false; jitExits[i].offsetBy(offsetInWhole); metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin); if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i])) return false; } for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { trapExits[trap].offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::TrapExit, trapExits[trap])) return false; } outOfBoundsExit.offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, outOfBoundsExit)) return false; unalignedAccessExit.offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, unalignedAccessExit)) return false; interruptExit.offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit)) return false; throwStub.offsetBy(offsetInWhole); if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, throwStub)) return false; // Fill in LinkData with the offsets of these stubs. linkData_.outOfBoundsOffset = outOfBoundsExit.begin; linkData_.interruptOffset = interruptExit.begin; // Now that all other code has been emitted, patch all remaining callsites. if (!patchCallSites(&trapExits)) return false; // Now that all code has been generated, patch far jumps to destinations. for (CallThunk& callThunk : metadata_->callThunks) { uint32_t funcIndex = callThunk.u.funcIndex; callThunk.u.codeRangeIndex = funcToCodeRange_[funcIndex]; CodeOffset farJump(callThunk.offset); masm_.patchFarJump(farJump, funcCodeRange(funcIndex).funcNonProfilingEntry()); } for (const TrapFarJump& farJump : masm_.trapFarJumps()) masm_.patchFarJump(farJump.jump, trapExits[farJump.trap].begin); // Code-generation is complete! masm_.finish(); return !masm_.oom(); }