bool RPowHalf::recover(JSContext *cx, SnapshotIterator &iter) const { RootedValue base(cx, iter.read()); RootedValue power(cx); RootedValue result(cx); power.setNumber(0.5); MOZ_ASSERT(base.isNumber()); if (!js::math_pow_handle(cx, base, power, &result)) return false; iter.storeInstructionResult(result); return true; }
bool RCreateThisWithTemplate::recover(JSContext* cx, SnapshotIterator& iter) const { RootedObject templateObject(cx, &iter.read().toObject()); // See CodeGenerator::visitCreateThisWithTemplate JSObject* resultObject = NewObjectOperationWithTemplate(cx, templateObject); if (!resultObject) return false; RootedValue result(cx); result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RNewArray::recover(JSContext* cx, SnapshotIterator& iter) const { RootedObject templateObject(cx, &iter.read().toObject()); RootedValue result(cx); RootedObjectGroup group(cx, templateObject->group()); JSObject* resultObject = NewFullyAllocatedArrayTryUseGroup(cx, group, count_); if (!resultObject) return false; result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RRsh::recover(JSContext *cx, SnapshotIterator &iter) const { RootedValue lhs(cx, iter.read()); RootedValue rhs(cx, iter.read()); MOZ_ASSERT(!lhs.isObject() && !rhs.isObject()); int32_t result; if (!js::BitRsh(cx, lhs, rhs, &result)) return false; RootedValue rootedResult(cx, js::Int32Value(result)); iter.storeInstructionResult(rootedResult); return true; }
bool RToDouble::recover(JSContext* cx, SnapshotIterator& iter) const { RootedValue v(cx, iter.read()); RootedValue result(cx); MOZ_ASSERT(!v.isObject()); MOZ_ASSERT(!v.isSymbol()); double dbl; if (!ToNumber(cx, v, &dbl)) return false; result.setDouble(dbl); iter.storeInstructionResult(result); return true; }
bool RCreateThisWithTemplate::recover(JSContext *cx, SnapshotIterator &iter) const { RootedNativeObject templateObject(cx, &iter.read().toObject().as<NativeObject>()); // See CodeGenerator::visitCreateThisWithTemplate gc::AllocKind allocKind = templateObject->asTenured().getAllocKind(); gc::InitialHeap initialHeap = tenuredHeap_ ? gc::TenuredHeap : gc::DefaultHeap; JSObject *resultObject = NativeObject::copy(cx, allocKind, initialHeap, templateObject); if (!resultObject) return false; RootedValue result(cx); result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RSqrt::recover(JSContext *cx, SnapshotIterator &iter) const { RootedValue num(cx, iter.read()); RootedValue result(cx); MOZ_ASSERT(num.isNumber()); if (!math_sqrt_handle(cx, num, &result)) return false; // MIRType_Float32 is a specialization embedding the fact that the result is // rounded to a Float32. if (isFloatOperation_ && !RoundFloat32(cx, result, &result)) return false; iter.storeInstructionResult(result); return true; }
bool RDiv::recover(JSContext *cx, SnapshotIterator &iter) const { RootedValue lhs(cx, iter.read()); RootedValue rhs(cx, iter.read()); RootedValue result(cx); if (!js::DivValues(cx, &lhs, &rhs, &result)) return false; // MIRType_Float32 is a specialization embedding the fact that the result is // rounded to a Float32. if (isFloatOperation_ && !RoundFloat32(cx, result, &result)) return false; iter.storeInstructionResult(result); return true; }
bool RNewArray::recover(JSContext *cx, SnapshotIterator &iter) const { RootedObject templateObject(cx, &iter.read().toObject()); RootedValue result(cx); RootedTypeObject type(cx); // See CodeGenerator::visitNewArrayCallVM if (!templateObject->hasSingletonType()) type = templateObject->type(); JSObject *resultObject = NewDenseArray(cx, count_, type, allocatingBehaviour_); if (!resultObject) return false; result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RHypot::recover(JSContext* cx, SnapshotIterator& iter) const { JS::AutoValueVector vec(cx); if (!vec.reserve(numOperands_)) return false; for (uint32_t i = 0 ; i < numOperands_ ; ++i) vec.infallibleAppend(iter.read()); RootedValue result(cx); if(!js::math_hypot_handle(cx, vec, &result)) return false; iter.storeInstructionResult(result); return true; }
bool RNewObject::recover(JSContext *cx, SnapshotIterator &iter) const { RootedNativeObject templateObject(cx, &iter.read().toObject().as<NativeObject>()); RootedValue result(cx); JSObject *resultObject = nullptr; // See CodeGenerator::visitNewObjectVMCall if (templateObjectIsClassPrototype_) resultObject = NewInitObjectWithClassPrototype(cx, templateObject); else resultObject = NewInitObject(cx, templateObject); if (!resultObject) return false; result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
static void CloseLiveIterator(JSContext *cx, const InlineFrameIterator &frame, uint32 localSlot) { SnapshotIterator si = frame.snapshotIterator(); // Skip stack slots until we reach the iterator object. uint32 base = CountArgSlots(frame.maybeCallee()) + frame.script()->nfixed; uint32 skipSlots = base + localSlot - 1; for (unsigned i = 0; i < skipSlots; i++) si.skip(); Value v = si.read(); JSObject *obj = &v.toObject(); if (cx->isExceptionPending()) UnwindIteratorForException(cx, obj); else UnwindIteratorForUncatchableException(cx, obj); }
bool RSignExtend::recover(JSContext* cx, SnapshotIterator& iter) const { RootedValue operand(cx, iter.read()); int32_t result; switch (MSignExtend::Mode(mode_)) { case MSignExtend::Byte: if (!js::SignExtendOperation<int8_t>(cx, operand, &result)) return false; break; case MSignExtend::Half: if (!js::SignExtendOperation<int16_t>(cx, operand, &result)) return false; break; } RootedValue rootedResult(cx, js::Int32Value(result)); iter.storeInstructionResult(rootedResult); return true; }
bool RCreateThisWithTemplate::recover(JSContext *cx, SnapshotIterator &iter) const { RootedNativeObject templateObject(cx, &iter.read().toObject().as<NativeObject>()); // Use AutoEnterAnalysis to avoid invoking the object metadata callback // while bailing out, which could try to walk the stack. types::AutoEnterAnalysis enter(cx); // See CodeGenerator::visitCreateThisWithTemplate gc::AllocKind allocKind = templateObject->asTenured().getAllocKind(); gc::InitialHeap initialHeap = tenuredHeap_ ? gc::TenuredHeap : gc::DefaultHeap; JSObject *resultObject = NativeObject::copy(cx, allocKind, initialHeap, templateObject); if (!resultObject) return false; RootedValue result(cx); result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RNewObject::recover(JSContext* cx, SnapshotIterator& iter) const { RootedObject templateObject(cx, &iter.read().toObject()); RootedValue result(cx); JSObject* resultObject = nullptr; // See CodeGenerator::visitNewObjectVMCall if (mode_ == MNewObject::ObjectLiteral) { resultObject = NewObjectOperationWithTemplate(cx, templateObject); } else { MOZ_ASSERT(mode_ == MNewObject::ObjectCreate); resultObject = ObjectCreateWithTemplate(cx, templateObject.as<PlainObject>()); } if (!resultObject) return false; result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RNewArray::recover(JSContext *cx, SnapshotIterator &iter) const { RootedObject templateObject(cx, &iter.read().toObject()); RootedValue result(cx); RootedTypeObject type(cx); // Use AutoEnterAnalysis to avoid invoking the object metadata callback // while bailing out, which could try to walk the stack. types::AutoEnterAnalysis enter(cx); // See CodeGenerator::visitNewArrayCallVM if (!templateObject->hasSingletonType()) type = templateObject->type(); JSObject *resultObject = NewDenseArray(cx, count_, type, allocatingBehaviour_); if (!resultObject) return false; result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RNewObject::recover(JSContext *cx, SnapshotIterator &iter) const { RootedObject templateObject(cx, &iter.read().toObject()); RootedValue result(cx); JSObject *resultObject = nullptr; // Use AutoEnterAnalysis to avoid invoking the object metadata callback // while bailing out, which could try to walk the stack. types::AutoEnterAnalysis enter(cx); // See CodeGenerator::visitNewObjectVMCall if (templateObjectIsClassPrototype_) resultObject = NewInitObjectWithClassPrototype(cx, templateObject); else resultObject = NewInitObject(cx, templateObject); if (!resultObject) return false; result.setObject(*resultObject); iter.storeInstructionResult(result); return true; }
bool RMul::recover(JSContext* cx, SnapshotIterator& iter) const { RootedValue lhs(cx, iter.read()); RootedValue rhs(cx, iter.read()); RootedValue result(cx); if (MMul::Mode(mode_) == MMul::Normal) { if (!js::MulValues(cx, &lhs, &rhs, &result)) return false; // MIRType_Float32 is a specialization embedding the fact that the // result is rounded to a Float32. if (isFloatOperation_ && !RoundFloat32(cx, result, &result)) return false; } else { MOZ_ASSERT(MMul::Mode(mode_) == MMul::Integer); if (!js::math_imul_handle(cx, lhs, rhs, &result)) return false; } iter.storeInstructionResult(result); return true; }
void StackFrame::initFromBailout(JSContext *cx, SnapshotIterator &iter) { AutoAssertNoGC nogc; uint32 exprStackSlots = iter.slots() - script()->nfixed; #ifdef TRACK_SNAPSHOTS iter.spewBailingFrom(); #endif IonSpew(IonSpew_Bailouts, " expr stack slots %u, is function frame %u", exprStackSlots, isFunctionFrame()); if (iter.bailoutKind() == Bailout_ArgumentCheck) { // Temporary hack -- skip the (unused) scopeChain, because it could be // bogus (we can fail before the scope chain slot is set). Strip the // hasScopeChain flag and we'll check this later to run prologue(). iter.skip(); flags_ &= ~StackFrame::HAS_SCOPECHAIN; } else { Value v = iter.read(); if (v.isObject()) { scopeChain_ = &v.toObject(); flags_ |= StackFrame::HAS_SCOPECHAIN; if (isFunctionFrame() && fun()->isHeavyweight()) flags_ |= StackFrame::HAS_CALL_OBJ; } else { JS_ASSERT(v.isUndefined()); } } // Assume that all new stack frames have had their entry flag set if // profiling has been turned on. This will be corrected if necessary // elsewhere. if (cx->runtime->spsProfiler.enabled()) setPushedSPSFrame(); if (isFunctionFrame()) { Value thisv = iter.read(); formals()[-1] = thisv; // The new |this| must have already been constructed prior to an Ion // constructor running. if (isConstructing()) JS_ASSERT(!thisv.isPrimitive()); JS_ASSERT(iter.slots() >= CountArgSlots(fun())); IonSpew(IonSpew_Bailouts, " frame slots %u, nargs %u, nfixed %u", iter.slots(), fun()->nargs, script()->nfixed); for (uint32 i = 0; i < fun()->nargs; i++) { Value arg = iter.read(); formals()[i] = arg; } } exprStackSlots -= CountArgSlots(maybeFun()); for (uint32 i = 0; i < script()->nfixed; i++) { Value slot = iter.read(); slots()[i] = slot; } IonSpew(IonSpew_Bailouts, " pushing %u expression stack slots", exprStackSlots); FrameRegs ®s = cx->regs(); for (uint32 i = 0; i < exprStackSlots; i++) { Value v; // If coming from an invalidation bailout, and this is the topmost // value, and a value override has been specified, don't read from the // iterator. Otherwise, we risk using a garbage value. if (!iter.moreFrames() && i == exprStackSlots - 1 && cx->runtime->hasIonReturnOverride()) v = iter.skip(); else v = iter.read(); *regs.sp++ = v; } unsigned pcOff = iter.pcOffset(); regs.pc = script()->code + pcOff; if (iter.resumeAfter()) regs.pc = GetNextPc(regs.pc); IonSpew(IonSpew_Bailouts, " new PC is offset %u within script %p (line %d)", pcOff, (void *)script(), PCToLineNumber(script(), regs.pc)); JS_ASSERT(exprStackSlots == js_ReconstructStackDepth(cx, script(), regs.pc)); }
void StackFrame::initFromBailout(JSContext *cx, SnapshotIterator &iter) { uint32_t exprStackSlots = iter.slots() - script()->nfixed; #ifdef TRACK_SNAPSHOTS iter.spewBailingFrom(); #endif IonSpew(IonSpew_Bailouts, " expr stack slots %u, is function frame %u", exprStackSlots, isFunctionFrame()); if (iter.bailoutKind() == Bailout_ArgumentCheck) { // Temporary hack -- skip the (unused) scopeChain, because it could be // bogus (we can fail before the scope chain slot is set). Strip the // hasScopeChain flag. If a call object is needed, it will get handled later // by |ThunkToInterpreter| which call |EnsureHasScopeObjects|. iter.skip(); flags_ &= ~StackFrame::HAS_SCOPECHAIN; // If the script binds arguments, then skip the snapshot slot reserved to hold // its value. if (script()->argumentsHasVarBinding()) iter.skip(); flags_ &= ~StackFrame::HAS_ARGS_OBJ; } else { Value scopeChain = iter.read(); JS_ASSERT(scopeChain.isObject() || scopeChain.isUndefined()); if (scopeChain.isObject()) { scopeChain_ = &scopeChain.toObject(); flags_ |= StackFrame::HAS_SCOPECHAIN; if (isFunctionFrame() && fun()->isHeavyweight()) flags_ |= StackFrame::HAS_CALL_OBJ; } // The second slot will be an arguments object if the script needs one. if (script()->argumentsHasVarBinding()) { Value argsObj = iter.read(); JS_ASSERT(argsObj.isObject() || argsObj.isUndefined()); if (argsObj.isObject()) initArgsObj(argsObj.toObject().asArguments()); } } // Assume that all new stack frames have had their entry flag set if // profiling has been turned on. This will be corrected if necessary // elsewhere. if (cx->runtime->spsProfiler.enabled()) setPushedSPSFrame(); if (isFunctionFrame()) { Value thisv = iter.read(); formals()[-1] = thisv; // The new |this| must have already been constructed prior to an Ion // constructor running. if (isConstructing()) JS_ASSERT(!thisv.isPrimitive()); JS_ASSERT(iter.slots() >= CountArgSlots(script(), fun())); IonSpew(IonSpew_Bailouts, " frame slots %u, nargs %u, nfixed %u", iter.slots(), fun()->nargs, script()->nfixed); for (uint32_t i = 0; i < fun()->nargs; i++) { Value arg = iter.read(); formals()[i] = arg; } } exprStackSlots -= CountArgSlots(script(), maybeFun()); for (uint32_t i = 0; i < script()->nfixed; i++) { Value slot = iter.read(); slots()[i] = slot; } IonSpew(IonSpew_Bailouts, " pushing %u expression stack slots", exprStackSlots); FrameRegs ®s = cx->regs(); for (uint32_t i = 0; i < exprStackSlots; i++) { Value v; // If coming from an invalidation bailout, and this is the topmost // value, and a value override has been specified, don't read from the // iterator. Otherwise, we risk using a garbage value. if (!iter.moreFrames() && i == exprStackSlots - 1 && cx->runtime->hasIonReturnOverride()) v = iter.skip(); else v = iter.read(); *regs.sp++ = v; } unsigned pcOff = iter.pcOffset(); regs.pc = script()->code + pcOff; if (iter.resumeAfter()) regs.pc = GetNextPc(regs.pc); IonSpew(IonSpew_Bailouts, " new PC is offset %u within script %p (line %d)", pcOff, (void *)script(), PCToLineNumber(script(), regs.pc)); // For fun.apply({}, arguments) the reconstructStackDepth will be atleast 4, // but it could be that we inlined the funapply. In that case exprStackSlots, // will have the real arguments in the slots and not always be equal. JS_ASSERT_IF(JSOp(*regs.pc) != JSOP_FUNAPPLY, exprStackSlots == js_ReconstructStackDepth(cx, script(), regs.pc)); }