FrameIterator::FrameIterator(const WasmActivation& activation) : activation_(&activation), code_(nullptr), callsite_(nullptr), codeRange_(nullptr), fp_(activation.fp()), pc_(nullptr), missingFrameMessage_(false) { if (fp_) { settle(); return; } void* pc = activation.resumePC(); if (!pc) { MOZ_ASSERT(done()); return; } pc_ = (uint8_t*)pc; code_ = activation_->compartment()->wasm.lookupCode(pc); MOZ_ASSERT(code_); const CodeRange* codeRange = code_->lookupRange(pc); MOZ_ASSERT(codeRange); if (codeRange->kind() == CodeRange::Function) codeRange_ = codeRange; else missingFrameMessage_ = true; MOZ_ASSERT(!done()); }
FrameIterator::FrameIterator(const WasmActivation& activation) : cx_(activation.cx()), instance_(&activation.instance()), callsite_(nullptr), codeRange_(nullptr), fp_(activation.fp()), missingFrameMessage_(false) { if (fp_) { settle(); return; } void* pc = activation.resumePC(); if (!pc) return; const CodeRange* codeRange = instance_->lookupCodeRange(pc); MOZ_ASSERT(codeRange); if (codeRange->kind() == CodeRange::Function) codeRange_ = codeRange; else missingFrameMessage_ = true; MOZ_ASSERT(!done()); }
// Use an int32_t return type instead of bool since bool does not have a // specified width and the caller is assuming a word-sized return. static int32_t InvokeImport_Void(int32_t importIndex, int32_t argc, Value* argv) { WasmActivation* activation = JSRuntime::innermostWasmActivation(); JSContext* cx = activation->cx(); RootedValue rval(cx); return activation->module().callImport(cx, importIndex, argc, argv, &rval); }
void ProfilingFrameIterator::initFromFP(const WasmActivation& activation) { uint8_t* fp = activation.fp(); // If a signal was handled while entering an activation, the frame will // still be null. if (!fp) { MOZ_ASSERT(done()); return; } // Since we don't have the pc for fp, start unwinding at the caller of fp // (ReturnAddressFromFP(fp)). This means that the innermost frame is // skipped. This is fine because: // - for import exit calls, the innermost frame is a thunk, so the first // frame that shows up is the function calling the import; // - for Math and other builtin calls as well as interrupts, we note the absence // of an exit reason and inject a fake "builtin" frame; and // - for async interrupts, we just accept that we'll lose the innermost frame. void* pc = ReturnAddressFromFP(fp); const CodeRange* codeRange = instance_->lookupCodeRange(pc); MOZ_ASSERT(codeRange); codeRange_ = codeRange; stackAddress_ = fp; switch (codeRange->kind()) { case CodeRange::Entry: callerPC_ = nullptr; callerFP_ = nullptr; break; case CodeRange::Function: fp = CallerFPFromFP(fp); callerPC_ = ReturnAddressFromFP(fp); callerFP_ = CallerFPFromFP(fp); AssertMatchesCallSite(*instance_, callerPC_, callerFP_, fp); break; case CodeRange::ImportJitExit: case CodeRange::ImportInterpExit: case CodeRange::Inline: case CodeRange::CallThunk: MOZ_CRASH("Unexpected CodeRange kind"); } // The iterator inserts a pretend innermost frame for non-None ExitReasons. // This allows the variety of exit reasons to show up in the callstack. exitReason_ = activation.exitReason(); // In the case of calls to builtins or asynchronous interrupts, no exit path // is taken so the exitReason is None. Coerce these to the Native exit // reason so that self-time is accounted for. if (exitReason_ == ExitReason::None) exitReason_ = ExitReason::Native; MOZ_ASSERT(!done()); }
FrameIterator::FrameIterator(const WasmActivation& activation) : cx_(activation.cx()), module_(&activation.module()), callsite_(nullptr), codeRange_(nullptr), fp_(activation.fp()) { if (fp_) settle(); }
static int32_t InvokeImport_I64(int32_t importIndex, int32_t argc, uint64_t* argv) { WasmActivation* activation = JSRuntime::innermostWasmActivation(); JSContext* cx = activation->cx(); RootedValue rval(cx); if (!activation->module().callImport(cx, importIndex, argc, argv, &rval)) return false; if (!ReadI64Object(cx, rval, (int64_t*)argv)) return false; return true; }
// Use an int32_t return type instead of bool since bool does not have a // specified width and the caller is assuming a word-sized return. static int32_t InvokeImport_F64(int32_t importIndex, int32_t argc, Value* argv) { WasmActivation* activation = JSRuntime::innermostWasmActivation(); JSContext* cx = activation->cx(); RootedValue rval(cx); if (!activation->module().callImport(cx, importIndex, argc, argv, &rval)) return false; double dbl; if (!ToNumber(cx, rval, &dbl)) return false; argv[0] = DoubleValue(dbl); return true; }
// Use an int32_t return type instead of bool since bool does not have a // specified width and the caller is assuming a word-sized return. static int32_t InvokeImport_I32(int32_t importIndex, int32_t argc, Value* argv) { WasmActivation* activation = JSRuntime::innermostWasmActivation(); JSContext* cx = activation->cx(); RootedValue rval(cx); if (!activation->module().callImport(cx, importIndex, argc, argv, &rval)) return false; int32_t i32; if (!ToInt32(cx, rval, &i32)) return false; argv[0] = Int32Value(i32); return true; }
ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation) : instance_(&activation.instance()), codeRange_(nullptr), callerFP_(nullptr), callerPC_(nullptr), stackAddress_(nullptr), exitReason_(ExitReason::None) { // If profiling hasn't been enabled for this instance, then CallerFPFromFP // will be trash, so ignore the entire activation. In practice, this only // happens if profiling is enabled while the instance is on the stack (in // which case profiling will be enabled when the instance becomes inactive // and gets called again). if (!instance_->profilingEnabled()) { MOZ_ASSERT(done()); return; } initFromFP(activation); }
static inline void AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* callerFP, void* fp) { #ifdef DEBUG Code* code = activation.compartment()->wasm.lookupCode(callerPC); MOZ_ASSERT(code); const CodeRange* callerCodeRange = code->lookupRange(callerPC); MOZ_ASSERT(callerCodeRange); if (callerCodeRange->kind() == CodeRange::Entry) { MOZ_ASSERT(callerFP == nullptr); return; } const CallSite* callsite = code->lookupCallSite(callerPC); MOZ_ASSERT(callsite); MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth()); #endif }
ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation, const RegisterState& state) : activation_(&activation), code_(nullptr), codeRange_(nullptr), callerFP_(nullptr), callerPC_(nullptr), stackAddress_(nullptr), exitReason_(ExitReason::None) { // If profiling hasn't been enabled for this instance, then CallerFPFromFP // will be trash, so ignore the entire activation. In practice, this only // happens if profiling is enabled while the instance is on the stack (in // which case profiling will be enabled when the instance becomes inactive // and gets called again). if (!activation_->compartment()->wasm.profilingEnabled()) { MOZ_ASSERT(done()); return; } // If pc isn't in the instance's code, we must have exited the code via an // exit trampoline or signal handler. code_ = activation_->compartment()->wasm.lookupCode(state.pc); if (!code_) { initFromFP(); return; } // Note: fp may be null while entering and leaving the activation. uint8_t* fp = activation.fp(); const CodeRange* codeRange = code_->lookupRange(state.pc); switch (codeRange->kind()) { case CodeRange::Function: case CodeRange::FarJumpIsland: case CodeRange::ImportJitExit: case CodeRange::ImportInterpExit: case CodeRange::TrapExit: { // When the pc is inside the prologue/epilogue, the innermost // call's AsmJSFrame is not complete and thus fp points to the // second-to-innermost call's AsmJSFrame. Since fp can only tell you // about its caller (via ReturnAddressFromFP(fp)), naively unwinding // while pc is in the prologue/epilogue would skip the second-to- // innermost call. To avoid this problem, we use the static structure of // the code in the prologue and epilogue to do the Right Thing. uint32_t offsetInModule = (uint8_t*)state.pc - code_->segment().base(); MOZ_ASSERT(offsetInModule >= codeRange->begin()); MOZ_ASSERT(offsetInModule < codeRange->end()); uint32_t offsetInCodeRange = offsetInModule - codeRange->begin(); void** sp = (void**)state.sp; #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) if (offsetInCodeRange < PushedRetAddr || InThunk(*codeRange, offsetInModule)) { // First instruction of the ARM/MIPS function; the return address is // still in lr and fp still holds the caller's fp. callerPC_ = state.lr; callerFP_ = fp; AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 2); } else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) { // Second-to-last instruction of the ARM/MIPS function; fp points to // the caller's fp; have not yet popped AsmJSFrame. callerPC_ = ReturnAddressFromFP(sp); callerFP_ = CallerFPFromFP(sp); AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp); } else #endif if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn() || InThunk(*codeRange, offsetInModule)) { // The return address has been pushed on the stack but not fp; fp // still points to the caller's fp. callerPC_ = *sp; callerFP_ = fp; AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 1); } else if (offsetInCodeRange < StoredFP) { // The full AsmJSFrame has been pushed; fp still points to the // caller's frame. MOZ_ASSERT(fp == CallerFPFromFP(sp)); callerPC_ = ReturnAddressFromFP(sp); callerFP_ = CallerFPFromFP(sp); AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp); } else { // Not in the prologue/epilogue. callerPC_ = ReturnAddressFromFP(fp); callerFP_ = CallerFPFromFP(fp); AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp); } break; } case CodeRange::Entry: { // The entry trampoline is the final frame in an WasmActivation. The entry // trampoline also doesn't GeneratePrologue/Epilogue so we can't use // the general unwinding logic above. MOZ_ASSERT(!fp); callerPC_ = nullptr; callerFP_ = nullptr; break; } case CodeRange::Inline: { // The throw stub clears WasmActivation::fp on it's way out. if (!fp) { MOZ_ASSERT(done()); return; } // Most inline code stubs execute after the prologue/epilogue have // completed so we can simply unwind based on fp. The only exception is // the async interrupt stub, since it can be executed at any time. // However, the async interrupt is super rare, so we can tolerate // skipped frames. Thus, we use simply unwind based on fp. callerPC_ = ReturnAddressFromFP(fp); callerFP_ = CallerFPFromFP(fp); AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp); break; } } codeRange_ = codeRange; stackAddress_ = state.sp; MOZ_ASSERT(!done()); }