/* * This function must only be called after the early prologue, since it depends * on fp->exec.fun. */ void * JS_FASTCALL stubs::FixupArity(VMFrame &f, uint32_t nactual) { JSContext *cx = f.cx; StackFrame *oldfp = f.fp(); JS_ASSERT(nactual != oldfp->numFormalArgs()); /* * Grossssss! *move* the stack frame. If this ends up being perf-critical, * we can figure out how to spot-optimize it. Be careful to touch only the * members that have been initialized by the caller and early prologue. */ InitialFrameFlags initial = oldfp->initialFlags(); JSFunction *fun = oldfp->fun(); JSScript *script = fun->script(); void *ncode = oldfp->nativeReturnAddress(); /* Pop the inline frame. */ f.regs.popPartialFrame((Value *)oldfp); /* Reserve enough space for a callee frame. */ CallArgs args = CallArgsFromSp(nactual, f.regs.sp); StackFrame *fp = cx->stack.getFixupFrame(cx, DONT_REPORT_ERROR, args, fun, script, ncode, initial, &f.stackLimit); if (!fp) { f.regs.updateForNcode(f.jit(), ncode); js_ReportOverRecursed(cx); THROWV(NULL); } /* The caller takes care of assigning fp to regs. */ return fp; }
void JS_FASTCALL ic::GetGlobalName(VMFrame &f, ic::GetGlobalNameIC *ic) { JSObject *obj = f.fp()->scopeChain().getGlobal(); JSAtom *atom = f.script()->getAtom(GET_INDEX(f.pc())); jsid id = ATOM_TO_JSID(atom); const Shape *shape = obj->nativeLookup(f.cx, id); if (!shape || !shape->hasDefaultGetterOrIsMethod() || !shape->hasSlot()) { if (shape) PatchGetFallback(f, ic); stubs::GetGlobalName(f); return; } uint32 slot = shape->slot; /* Patch shape guard. */ Repatcher repatcher(f.jit()); repatcher.repatch(ic->fastPathStart.dataLabel32AtOffset(ic->shapeOffset), obj->shape()); /* Patch loads. */ uint32 index = obj->dynamicSlotIndex(slot); JSC::CodeLocationLabel label = ic->fastPathStart.labelAtOffset(ic->loadStoreOffset); repatcher.patchAddressOffsetForValueLoad(label, index * sizeof(Value)); /* Do load anyway... this time. */ stubs::GetGlobalName(f); }
static void PatchGetFallback(VMFrame &f, ic::GetGlobalNameIC *ic) { Repatcher repatch(f.jit()); JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, stubs::GetGlobalName)); repatch.relink(ic->slowPathCall, fptr); }
static LookupStatus UpdateSetGlobalName(VMFrame &f, ic::SetGlobalNameIC *ic, JSObject *obj, const Shape *shape) { /* Give globals a chance to appear. */ if (!shape) return Lookup_Uncacheable; if (shape->isMethod() || !shape->hasDefaultSetter() || !shape->writable() || !shape->hasSlot() || obj->watched()) { /* Disable the IC for weird shape attributes and watchpoints. */ PatchSetFallback(f, ic); return Lookup_Uncacheable; } /* Branded sets must guard that they don't overwrite method-valued properties. */ if (obj->branded()) { /* * If this slot has a function valued property, the tail of this opcode * could change the shape. Even if it doesn't, the IC is probably * pointless, because it will always hit the function-test path and * bail out. In these cases, don't bother building or updating the IC. */ const Value &v = obj->getSlot(shape->slot); if (v.isObject() && v.toObject().isFunction()) { /* * If we're going to rebrand, the object may unbrand, allowing this * IC to come back to life. In that case, we don't disable the IC. */ if (!ChangesMethodValue(v, f.regs.sp[-1])) PatchSetFallback(f, ic); return Lookup_Uncacheable; } if (ic->hasExtraStub) return UpdateSetGlobalNameStub(f, ic, obj, shape); return AttachSetGlobalNameStub(f, ic, obj, shape); } /* Object is not branded, so we can use the inline path. */ Repatcher repatcher(f.jit()); ic->patchInlineShapeGuard(repatcher, obj->shape()); uint32 index = obj->dynamicSlotIndex(shape->slot); JSC::CodeLocationLabel label = ic->fastPathStart.labelAtOffset(ic->loadStoreOffset); repatcher.patchAddressOffsetForValueStore(label, index * sizeof(Value), ic->vr.isTypeKnown()); return Lookup_Cacheable; }
static void PatchSetFallback(VMFrame &f, ic::SetGlobalNameIC *ic) { JSScript *script = f.script(); Repatcher repatch(f.jit()); VoidStubSetGlobal stub = ic->usePropertyCache ? STRICT_VARIANT(DisabledSetGlobal) : STRICT_VARIANT(DisabledSetGlobalNoCache); JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, stub)); repatch.relink(ic->slowPathCall, fptr); }
/* * This function must only be called after the early prologue, since it depends * on fp->exec.fun. */ void * JS_FASTCALL stubs::FixupArity(VMFrame &f, uint32 nactual) { JSContext *cx = f.cx; JSStackFrame *oldfp = f.fp(); JS_ASSERT(nactual != oldfp->numFormalArgs()); /* * Grossssss! *move* the stack frame. If this ends up being perf-critical, * we can figure out how to spot-optimize it. Be careful to touch only the * members that have been initialized by initCallFrameCallerHalf and the * early prologue. */ uint32 flags = oldfp->isConstructingFlag(); JSFunction *fun = oldfp->fun(); void *ncode = oldfp->nativeReturnAddress(); /* Pop the inline frame. */ f.fp() = oldfp->prev(); f.regs.sp = (Value*) oldfp; /* Reserve enough space for a callee frame. */ JSStackFrame *newfp = cx->stack().getInlineFrameWithinLimit(cx, (Value*) oldfp, nactual, fun, fun->script(), &flags, f.entryfp, &f.stackLimit); if (!newfp) { /* * The PC is not coherent with the current frame, so fix it up for * exception handling. */ f.regs.pc = f.jit()->nativeToPC(ncode); THROWV(NULL); } /* Reset the part of the stack frame set by the caller. */ newfp->initCallFrameCallerHalf(cx, flags, ncode); /* Reset the part of the stack frame set by the prologue up to now. */ newfp->initCallFrameEarlyPrologue(fun, nactual); /* The caller takes care of assigning fp to regs. */ return newfp; }
/* * This function must only be called after the early prologue, since it depends * on fp->exec.fun. */ void * JS_FASTCALL stubs::FixupArity(VMFrame &f, uint32 nactual) { JSContext *cx = f.cx; StackFrame *oldfp = f.fp(); JS_ASSERT(nactual != oldfp->numFormalArgs()); /* * Grossssss! *move* the stack frame. If this ends up being perf-critical, * we can figure out how to spot-optimize it. Be careful to touch only the * members that have been initialized by initJitFrameCallerHalf and the * early prologue. */ MaybeConstruct construct = oldfp->isConstructing(); JSFunction *fun = oldfp->fun(); JSScript *script = fun->script(); void *ncode = oldfp->nativeReturnAddress(); /* Pop the inline frame. */ f.regs.popPartialFrame((Value *)oldfp); /* Reserve enough space for a callee frame. */ CallArgs args = CallArgsFromSp(nactual, f.regs.sp); StackFrame *fp = cx->stack.getFixupFrame(cx, DONT_REPORT_ERROR, args, fun, script, ncode, construct, &f.stackLimit); if (!fp) { /* * The PC is not coherent with the current frame, so fix it up for * exception handling. */ f.regs.pc = f.jit()->nativeToPC(ncode); js_ReportOverRecursed(cx); THROWV(NULL); } /* The caller takes care of assigning fp to regs. */ return fp; }
static inline bool UncachedInlineCall(VMFrame &f, InitialFrameFlags initial, void **pret, bool *unjittable, uint32_t argc) { AssertCanGC(); JSContext *cx = f.cx; CallArgs args = CallArgsFromSp(argc, f.regs.sp); RootedFunction newfun(cx, args.callee().toFunction()); RootedScript newscript(cx, newfun->getOrCreateScript(cx)); if (!newscript) return false; bool construct = InitialFrameFlagsAreConstructing(initial); RootedScript fscript(cx, f.script()); bool newType = construct && cx->typeInferenceEnabled() && types::UseNewType(cx, fscript, f.pc()); if (!types::TypeMonitorCall(cx, args, construct)) return false; /* Try to compile if not already compiled. */ if (ShouldJaegerCompileCallee(cx, f.script(), newscript, f.jit())) { CompileStatus status = CanMethodJIT(cx, newscript, newscript->code, construct, CompileRequest_JIT, f.fp()); if (status == Compile_Error) { /* A runtime exception was thrown, get out. */ return false; } if (status == Compile_Abort) *unjittable = true; } /* * Make sure we are not calling from an inline frame if we need to make a * call object for the callee, as doing so could trigger GC and cause * jitcode discarding / frame expansion. */ if (f.regs.inlined() && newfun->isHeavyweight()) { ExpandInlineFrames(cx->compartment); JS_ASSERT(!f.regs.inlined()); } /* * Preserve f.regs.fp while pushing the new frame, for the invariant that * f.regs reflects the state when we entered the stub call. This handoff is * tricky: we need to make sure that f.regs is not updated to the new * frame, and we also need to ensure that cx->regs still points to f.regs * when space is reserved, in case doing so throws an exception. */ FrameRegs regs = f.regs; /* Get pointer to new frame/slots, prepare arguments. */ if (!cx->stack.pushInlineFrame(cx, regs, args, *newfun, newscript, initial, &f.stackLimit)) return false; /* Finish the handoff to the new frame regs. */ PreserveRegsGuard regsGuard(cx, regs); /* * If newscript was successfully compiled, run it. Skip for calls which * will be constructing a new type object for 'this'. */ if (!newType) { if (JITScript *jit = newscript->getJIT(regs.fp()->isConstructing(), cx->compartment->compileBarriers())) { if (jit->invokeEntry) { *pret = jit->invokeEntry; /* Restore the old fp around and let the JIT code repush the new fp. */ regs.popFrame((Value *) regs.fp()); return true; } } } /* * Otherwise, run newscript in the interpreter. Expand any inlined frame we * are calling from, as the new frame is not associated with the VMFrame * and will not have its prevpc info updated if frame expansion is * triggered while interpreting. */ if (f.regs.inlined()) { ExpandInlineFrames(cx->compartment); JS_ASSERT(!f.regs.inlined()); regs.fp()->resetInlinePrev(f.fp(), f.regs.pc); } JS_CHECK_RECURSION(cx, return false); RootedScript script(cx, newscript); bool ok = RunScript(cx, script, cx->fp()); f.cx->stack.popInlineFrame(regs); if (ok) { RootedScript fscript(cx, f.script()); types::TypeScript::Monitor(f.cx, fscript, f.pc(), args.rval()); } *pret = NULL; return ok; }
static LookupStatus AttachSetGlobalNameStub(VMFrame &f, ic::SetGlobalNameIC *ic, JSObject *obj, const Shape *shape) { Assembler masm; Label start = masm.label(); DataLabel32 shapeLabel; Jump guard = masm.branch32WithPatch(Assembler::NotEqual, ic->shapeReg, Imm32(obj->shape()), shapeLabel); /* A constant object needs rematerialization. */ if (ic->objConst) masm.move(ImmPtr(obj), ic->objReg); JS_ASSERT(obj->branded()); /* * Load obj->slots. If ic->objConst, then this clobbers objReg, because * ic->objReg == ic->shapeReg. */ JS_ASSERT(!obj->isFixedSlot(shape->slot)); masm.loadPtr(Address(ic->objReg, JSObject::offsetOfSlots()), ic->shapeReg); /* Test if overwriting a function-tagged slot. */ Address slot(ic->shapeReg, sizeof(Value) * obj->dynamicSlotIndex(shape->slot)); Jump isNotObject = masm.testObject(Assembler::NotEqual, slot); /* Now, test if the object is a function object. */ masm.loadPayload(slot, ic->shapeReg); Jump isFun = masm.testFunction(Assembler::Equal, ic->shapeReg); /* Restore shapeReg to obj->slots, since we clobbered it. */ if (ic->objConst) masm.move(ImmPtr(obj), ic->objReg); masm.loadPtr(Address(ic->objReg, JSObject::offsetOfSlots()), ic->shapeReg); /* If the object test fails, shapeReg is still obj->slots. */ isNotObject.linkTo(masm.label(), &masm); DataLabel32 store = masm.storeValueWithAddressOffsetPatch(ic->vr, slot); Jump done = masm.jump(); JITScript *jit = f.jit(); LinkerHelper linker(masm, JSC::METHOD_CODE); JSC::ExecutablePool *ep = linker.init(f.cx); if (!ep) return Lookup_Error; if (!jit->execPools.append(ep)) { ep->release(); js_ReportOutOfMemory(f.cx); return Lookup_Error; } if (!linker.verifyRange(jit)) return Lookup_Uncacheable; linker.link(done, ic->fastPathStart.labelAtOffset(ic->fastRejoinOffset)); linker.link(guard, ic->slowPathStart); linker.link(isFun, ic->slowPathStart); JSC::CodeLocationLabel cs = linker.finalize(f); JaegerSpew(JSpew_PICs, "generated setgname stub at %p\n", cs.executableAddress()); Repatcher repatcher(f.jit()); repatcher.relink(ic->fastPathStart.jumpAtOffset(ic->inlineShapeJump), cs); int offset = linker.locationOf(shapeLabel) - linker.locationOf(start); ic->extraShapeGuard = offset; JS_ASSERT(ic->extraShapeGuard == offset); ic->extraStub = JSC::JITCode(cs.executableAddress(), linker.size()); offset = linker.locationOf(store) - linker.locationOf(start); ic->extraStoreOffset = offset; JS_ASSERT(ic->extraStoreOffset == offset); ic->hasExtraStub = true; return Lookup_Cacheable; }