/* * Since memory has been exhausted, avoid the normal error-handling path which * allocates an error object, report and callstack. If code is running, simply * throw the static atom "out of memory". If code is not running, call the * error reporter directly. * * Furthermore, callers of js_ReportOutOfMemory (viz., malloc) assume a GC does * not occur, so GC must be avoided or suppressed. */ void js_ReportOutOfMemory(ThreadSafeContext *cxArg) { if (!cxArg->isJSContext()) return; JSContext *cx = cxArg->asJSContext(); cx->runtime()->hadOutOfMemory = true; if (JS_IsRunning(cx)) { cx->setPendingException(StringValue(cx->names().outOfMemory)); return; } /* Get the message for this error, but we don't expand any arguments. */ const JSErrorFormatString *efs = js_GetLocalizedErrorMessage(cx, NULL, NULL, JSMSG_OUT_OF_MEMORY); const char *msg = efs ? efs->format : "Out of memory"; /* Fill out the report, but don't do anything that requires allocation. */ JSErrorReport report; PodZero(&report); report.flags = JSREPORT_ERROR; report.errorNumber = JSMSG_OUT_OF_MEMORY; PopulateReportBlame(cx, &report); /* Report the error. */ if (JSErrorReporter onError = cx->errorReporter) { AutoSuppressGC suppressGC(cx); onError(cx, msg, &report); } }
JSContext* js::NewContext(uint32_t maxBytes, uint32_t maxNurseryBytes, JSRuntime* parentRuntime) { AutoNoteSingleThreadedRegion anstr; MOZ_RELEASE_ASSERT(!TlsContext.get()); JSRuntime* runtime = js_new<JSRuntime>(parentRuntime); if (!runtime) return nullptr; JSContext* cx = js_new<JSContext>(runtime, JS::ContextOptions()); if (!cx) { js_delete(runtime); return nullptr; } if (!runtime->init(cx, maxBytes, maxNurseryBytes)) { runtime->destroyRuntime(); js_delete(cx); js_delete(runtime); return nullptr; } if (!cx->init(ContextKind::Cooperative)) { runtime->destroyRuntime(); js_delete(cx); js_delete(runtime); return nullptr; } return cx; }
void CheckZoneGroup<Helper>::check() const { if (OnHelperThread<Helper>()) return; JSContext* cx = TlsContext.get(); if (group) { if (group->usedByHelperThread()) { MOZ_ASSERT(group->ownedByCurrentThread()); } else { // This check is disabled on windows for the same reason as in // CheckActiveThread. #ifndef XP_WIN // In a cooperatively scheduled runtime the active thread is // permitted access to all zone groups --- even those it has not // entered --- for GC and similar purposes. Since all other // cooperative threads are suspended, these accesses are threadsafe // if the zone group is not in use by a helper thread. // // A corollary to this is that suspended cooperative threads may // not access anything in a zone group, even zone groups they own, // because they're not allowed to interact with the JS API. MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime())); #endif } } else { // |group| will be null for data in the atoms zone. This is protected // by the exclusive access lock. MOZ_ASSERT(cx->runtime()->currentThreadHasExclusiveAccess()); } }
bool report(Census &census, MutableHandleValue report) { JSContext *cx = census.cx; RootedObject obj(cx, NewBuiltinClassInstance(cx, &JSObject::class_)); if (!obj) return false; RootedValue objectsReport(cx); if (!objects.report(census, &objectsReport) || !JSObject::defineProperty(cx, obj, cx->names().objects, objectsReport)) return false; RootedValue scriptsReport(cx); if (!scripts.report(census, &scriptsReport) || !JSObject::defineProperty(cx, obj, cx->names().scripts, scriptsReport)) return false; RootedValue stringsReport(cx); if (!strings.report(census, &stringsReport) || !JSObject::defineProperty(cx, obj, cx->names().strings, stringsReport)) return false; RootedValue otherReport(cx); if (!other.report(census, &otherReport) || !JSObject::defineProperty(cx, obj, cx->names().other, otherReport)) return false; report.setObject(*obj); return true; }
JSContext * js::NewContext(JSRuntime *rt, size_t stackChunkSize) { JS_AbortIfWrongThread(rt); JSContext *cx = js_new<JSContext>(rt); if (!cx) return NULL; JS_ASSERT(cx->findVersion() == JSVERSION_DEFAULT); if (!cx->cycleDetectorSet.init()) { js_delete(cx); return NULL; } /* * Here the GC lock is still held after js_InitContextThreadAndLockGC took it and * the GC is not running on another thread. */ bool first = JS_CLIST_IS_EMPTY(&rt->contextList); JS_APPEND_LINK(&cx->link, &rt->contextList); js_InitRandom(cx); /* * If cx is the first context on this runtime, initialize well-known atoms, * keywords, numbers, strings and self-hosted scripts. If one of these * steps should fail, the runtime will be left in a partially initialized * state, with zeroes and nulls stored in the default-initialized remainder * of the struct. We'll clean the runtime up under DestroyContext, because * cx will be "last" as well as "first". */ if (first) { #ifdef JS_THREADSAFE JS_BeginRequest(cx); #endif bool ok = rt->staticStrings.init(cx); if (ok) ok = InitCommonAtoms(cx); if (ok) ok = rt->initSelfHosting(cx); #ifdef JS_THREADSAFE JS_EndRequest(cx); #endif if (!ok) { DestroyContext(cx, DCM_NEW_FAILED); return NULL; } } JSContextCallback cxCallback = rt->cxCallback; if (cxCallback && !cxCallback(cx, JSCONTEXT_NEW)) { DestroyContext(cx, DCM_NEW_FAILED); return NULL; } return cx; }
uint32_t ion::Bailout(BailoutStack *sp, BaselineBailoutInfo **bailoutInfo) { JS_ASSERT(bailoutInfo); JSContext *cx = GetIonContext()->cx; // We don't have an exit frame. cx->mainThread().ionTop = NULL; JitActivationIterator jitActivations(cx->runtime()); IonBailoutIterator iter(jitActivations, sp); JitActivation *activation = jitActivations.activation()->asJit(); IonSpew(IonSpew_Bailouts, "Took bailout! Snapshot offset: %d", iter.snapshotOffset()); JS_ASSERT(IsBaselineEnabled(cx)); *bailoutInfo = NULL; uint32_t retval = BailoutIonToBaseline(cx, activation, iter, false, bailoutInfo); JS_ASSERT(retval == BAILOUT_RETURN_OK || retval == BAILOUT_RETURN_FATAL_ERROR || retval == BAILOUT_RETURN_OVERRECURSED); JS_ASSERT_IF(retval == BAILOUT_RETURN_OK, *bailoutInfo != NULL); if (retval != BAILOUT_RETURN_OK) EnsureExitFrame(iter.jsFrame()); return retval; }
bool BaselineFrame::initForOsr(InterpreterFrame *fp, uint32_t numStackValues) { mozilla::PodZero(this); scopeChain_ = fp->scopeChain(); if (fp->hasCallObjUnchecked()) flags_ |= BaselineFrame::HAS_CALL_OBJ; if (fp->isEvalFrame()) { flags_ |= BaselineFrame::EVAL; evalScript_ = fp->script(); } if (fp->script()->needsArgsObj() && fp->hasArgsObj()) { flags_ |= BaselineFrame::HAS_ARGS_OBJ; argsObj_ = &fp->argsObj(); } if (fp->hasHookData()) { flags_ |= BaselineFrame::HAS_HOOK_DATA; hookData_ = fp->hookData(); } if (fp->hasReturnValue()) setReturnValue(fp->returnValue()); if (fp->hasPushedSPSFrame()) flags_ |= BaselineFrame::HAS_PUSHED_SPS_FRAME; frameSize_ = BaselineFrame::FramePointerOffset + BaselineFrame::Size() + numStackValues * sizeof(Value); JS_ASSERT(numValueSlots() == numStackValues); for (uint32_t i = 0; i < numStackValues; i++) *valueSlot(i) = fp->slots()[i]; JSContext *cx = GetJSContextFromJitCode(); if (cx->compartment()->debugMode()) { // In debug mode, update any Debugger.Frame objects for the // InterpreterFrame to point to the BaselineFrame. // The caller pushed a fake return address. ScriptFrameIter, used by the // debugger, wants a valid return address, but it's okay to just pick one. // In debug mode there's always at least 1 ICEntry (since there are always // debug prologue/epilogue calls). IonFrameIterator iter(cx); JS_ASSERT(iter.returnAddress() == nullptr); BaselineScript *baseline = fp->script()->baselineScript(); iter.current()->setReturnAddress(baseline->returnAddressForIC(baseline->icEntry(0))); if (!Debugger::handleBaselineOsr(cx, fp, this)) return false; } return true; }
void JSCompartment::updateForDebugMode(JSContext *cx) { for (ThreadContextRange r(cx); !r.empty(); r.popFront()) { JSContext *cx = r.front(); if (cx->compartment == this) cx->updateJITEnabled(); } #ifdef JS_METHODJIT bool enabled = debugMode(); if (enabled) { JS_ASSERT(!hasScriptsOnStack(cx)); } else if (hasScriptsOnStack(cx)) { hasDebugModeCodeToDrop = true; return; } /* * Discard JIT code for any scripts that change debugMode. This assumes * that 'comp' is in the same thread as 'cx'. */ for (gc::CellIter i(cx, this, gc::FINALIZE_SCRIPT); !i.done(); i.next()) { JSScript *script = i.get<JSScript>(); if (script->debugMode != enabled) { mjit::ReleaseScriptCode(cx, script); script->debugMode = enabled; } } hasDebugModeCodeToDrop = false; #endif }
JSObject AcceptDict_to_js(const JSContext& js_context, const AcceptDict& config) { auto object = js_context.CreateObject(); object.SetProperty("error", config.error); object.SetProperty("timeout", js_context.CreateNumber(static_cast<double>(config.timeout.count()))); return object; }
uint32 ion::ReflowTypeInfo(uint32 bailoutResult) { JSContext *cx = GetIonContext()->cx; IonActivation *activation = cx->runtime->ionActivation; IonSpew(IonSpew_Bailouts, "reflowing type info"); if (bailoutResult == BAILOUT_RETURN_ARGUMENT_CHECK) { IonSpew(IonSpew_Bailouts, "reflowing type info at argument-checked entry"); ReflowArgTypes(cx); return true; } RootedScript script(cx, cx->fp()->script()); jsbytecode *pc = activation->bailout()->bailoutPc(); JS_ASSERT(js_CodeSpec[*pc].format & JOF_TYPESET); IonSpew(IonSpew_Bailouts, "reflowing type info at %s:%d pcoff %d", script->filename, script->lineno, pc - script->code); types::AutoEnterTypeInference enter(cx); if (bailoutResult == BAILOUT_RETURN_TYPE_BARRIER) script->analysis()->breakTypeBarriers(cx, pc - script->code, false); else JS_ASSERT(bailoutResult == BAILOUT_RETURN_MONITOR); // When a type barrier fails, the bad value is at the top of the stack. Value &result = cx->regs().sp[-1]; types::TypeScript::Monitor(cx, script, pc, result); return true; }
UnrootedFlatString StringBuffer::finishString() { JSContext *cx = context(); if (cb.empty()) return UnrootedFlatString(cx->names().empty); size_t length = cb.length(); if (!JSString::validateLength(cx, length)) return UnrootedFlatString(); JS_STATIC_ASSERT(JSShortString::MAX_SHORT_LENGTH < CharBuffer::InlineLength); if (JSShortString::lengthFits(length)) return NewShortString<CanGC>(cx, TwoByteChars(cb.begin(), length)); if (!cb.append('\0')) return UnrootedFlatString(); jschar *buf = extractWellSized(); if (!buf) return UnrootedFlatString(); JSFlatString *str = js_NewString<CanGC>(cx, buf, length); if (!str) js_free(buf); return str; }
/* * Given a frame newer than the entry frame, try to finish it. If it's at a * return position, pop the frame. If it's at a safe point, execute it in * Jaeger code. Otherwise, try to interpret until a safe point. * * While this function is guaranteed to make progress, it may not actually * finish or pop the current frame. It can either: * 1) Finalize a finished frame, or * 2) Finish and finalize the frame in the Method JIT, or * 3) Interpret, which can: * a) Propagate an error, or * b) Finish the frame, but not finalize it, or * c) Abruptly leave at any point in the frame, or in a newer frame * pushed by a call, that has method JIT'd code. */ static bool EvaluateExcessFrame(VMFrame &f, JSStackFrame *entryFrame) { JSContext *cx = f.cx; JSStackFrame *fp = cx->fp(); /* * A "finished" frame is when the interpreter rested on a STOP, * RETURN, RETRVAL, etc. We check for finished frames BEFORE looking * for a safe point. If the frame was finished, we could have already * called ScriptEpilogue(), and entering the JIT could call it twice. */ if (!fp->hasImacropc() && FrameIsFinished(cx)) return HandleFinishedFrame(f, entryFrame); if (void *ncode = AtSafePoint(cx)) { if (!JaegerShotAtSafePoint(cx, ncode)) return false; InlineReturn(f); AdvanceReturnPC(cx); return true; } return PartialInterpret(f); }
JSObject MediaQueryInfoType_to_js(const JSContext& js_context, const MediaQueryInfoType& config) { auto object = js_context.CreateObject(); object.SetProperty("exact", js_context.CreateBoolean(config.exact)); object.SetProperty("value", config.value); return object; }
jschar * StringBuffer::extractWellSized() { size_t capacity = cb.capacity(); size_t length = cb.length(); jschar *buf = cb.extractRawBuffer(); if (!buf) return NULL; /* For medium/big buffers, avoid wasting more than 1/4 of the memory. */ JS_ASSERT(capacity >= length); if (length > CharBuffer::sMaxInlineStorage && capacity - length > length / 4) { size_t bytes = sizeof(jschar) * (length + 1); JSContext *cx = context(); jschar *tmp = (jschar *)cx->realloc_(buf, bytes); if (!tmp) { js_free(buf); return NULL; } buf = tmp; } return buf; }
JSAtom* FrameIterator::functionDisplayAtom() const { MOZ_ASSERT(!done()); JSContext* cx = activation_->cx(); if (missingFrameMessage_) { const char* msg = "asm.js/wasm frames may be missing; enable the profiler before running " "to see all frames"; JSAtom* atom = Atomize(cx, msg, strlen(msg)); if (!atom) { cx->clearPendingException(); return cx->names().empty; } return atom; } MOZ_ASSERT(codeRange_); JSAtom* atom = code_->getFuncDefAtom(cx, codeRange_->funcDefIndex()); if (!atom) { cx->clearPendingException(); return cx->names().empty; } return atom; }
uint32_t ion::Bailout(BailoutStack *sp, BaselineBailoutInfo **bailoutInfo) { JS_ASSERT(bailoutInfo); JSContext *cx = GetIonContext()->cx; // We don't have an exit frame. cx->mainThread().ionTop = NULL; IonActivationIterator ionActivations(cx); IonBailoutIterator iter(ionActivations, sp); IonActivation *activation = ionActivations.activation(); // IonCompartment *ioncompartment = cx->compartment->ionCompartment(); // IonActivation *activation = cx->runtime->ionActivation; // FrameRecovery in = FrameRecoveryFromBailout(ioncompartment, sp); IonSpew(IonSpew_Bailouts, "Took bailout! Snapshot offset: %d", iter.snapshotOffset()); uint32_t retval; if (IsBaselineEnabled(cx)) { *bailoutInfo = NULL; retval = BailoutIonToBaseline(cx, activation, iter, false, bailoutInfo); JS_ASSERT(retval == BAILOUT_RETURN_BASELINE || retval == BAILOUT_RETURN_FATAL_ERROR || retval == BAILOUT_RETURN_OVERRECURSED); JS_ASSERT_IF(retval == BAILOUT_RETURN_BASELINE, *bailoutInfo != NULL); } else { retval = ConvertFrames(cx, activation, iter); } if (retval != BAILOUT_RETURN_BASELINE) EnsureExitFrame(iter.jsFrame()); return retval; }
static void DisableActivationFromAsmJS(AsmJSActivation *activation) { JSContext *cx = activation->cx(); Activation *act = cx->mainThread().activation(); JS_ASSERT(act->isJit()); act->asJit()->setActive(cx, false); }
/* * Since memory has been exhausted, avoid the normal error-handling path which * allocates an error object, report and callstack. If code is running, simply * throw the static atom "out of memory". If code is not running, call the * error reporter directly. * * Furthermore, callers of js_ReportOutOfMemory (viz., malloc) assume a GC does * not occur, so GC must be avoided or suppressed. */ void js_ReportOutOfMemory(ThreadSafeContext *cxArg) { #ifdef JS_MORE_DETERMINISTIC /* * OOMs are non-deterministic, especially across different execution modes * (e.g. interpreter vs JIT). In more-deterministic builds, print to stderr * so that the fuzzers can detect this. */ fprintf(stderr, "js_ReportOutOfMemory called\n"); #endif if (cxArg->isForkJoinSlice()) { cxArg->asForkJoinSlice()->setPendingAbortFatal(ParallelBailoutOutOfMemory); return; } if (!cxArg->isJSContext()) return; JSContext *cx = cxArg->asJSContext(); cx->runtime()->hadOutOfMemory = true; if (JS_IsRunning(cx)) { cx->setPendingException(StringValue(cx->names().outOfMemory)); return; } /* Get the message for this error, but we don't expand any arguments. */ const JSErrorFormatString *efs = js_GetLocalizedErrorMessage(cx, nullptr, nullptr, JSMSG_OUT_OF_MEMORY); const char *msg = efs ? efs->format : "Out of memory"; /* Fill out the report, but don't do anything that requires allocation. */ JSErrorReport report; PodZero(&report); report.flags = JSREPORT_ERROR; report.errorNumber = JSMSG_OUT_OF_MEMORY; PopulateReportBlame(cx, &report); /* Report the error. */ if (JSErrorReporter onError = cx->errorReporter) { AutoSuppressGC suppressGC(cx); onError(cx, msg, &report); } /* * We would like to enforce the invariant that any exception reported * during an OOM situation does not require wrapping. Besides avoiding * allocation when memory is low, this reduces the number of places where * we might need to GC. * * When JS code is running, we set the pending exception to an atom, which * does not need wrapping. If no JS code is running, no exception should be * set at all. */ JS_ASSERT(!cx->isExceptionPending()); }
JSObject AppModule::RectToJS(const JSContext& js_context, const Windows::Foundation::Rect& rect) { auto obj = js_context.CreateObject(); obj.SetProperty("width", js_context.CreateNumber(rect.Width)); obj.SetProperty("height", js_context.CreateNumber(rect.Height)); obj.SetProperty("x", js_context.CreateNumber(rect.X)); obj.SetProperty("y", js_context.CreateNumber(rect.Y)); return obj; }
bool js::XDRAtom(XDRState<mode> *xdr, MutableHandleAtom atomp) { if (mode == XDR_ENCODE) { uint32_t nchars = atomp->length(); if (!xdr->codeUint32(&nchars)) return false; jschar *chars = const_cast<jschar *>(atomp->getChars(xdr->cx())); if (!chars) return false; return xdr->codeChars(chars, nchars); } /* Avoid JSString allocation for already existing atoms. See bug 321985. */ uint32_t nchars; if (!xdr->codeUint32(&nchars)) return false; JSContext *cx = xdr->cx(); JSAtom *atom; #if IS_LITTLE_ENDIAN /* Directly access the little endian chars in the XDR buffer. */ const jschar *chars = reinterpret_cast<const jschar *>(xdr->buf.read(nchars * sizeof(jschar))); atom = AtomizeChars<CanGC>(cx, chars, nchars); #else /* * We must copy chars to a temporary buffer to convert between little and * big endian data. */ jschar *chars; jschar stackChars[256]; if (nchars <= ArrayLength(stackChars)) { chars = stackChars; } else { /* * This is very uncommon. Don't use the tempLifoAlloc arena for this as * most allocations here will be bigger than tempLifoAlloc's default * chunk size. */ chars = cx->runtime()->pod_malloc<jschar>(nchars); if (!chars) return false; } JS_ALWAYS_TRUE(xdr->codeChars(chars, nchars)); atom = AtomizeChars<CanGC>(cx, chars, nchars); if (chars != stackChars) js_free(chars); #endif /* !IS_LITTLE_ENDIAN */ if (!atom) return false; atomp.set(atom); return true; }
~EvalScriptGuard() { if (script_) { CallDestroyScriptHook(cx_->runtime()->defaultFreeOp(), script_); script_->cacheForEval(); EvalCacheEntry cacheEntry = {script_, lookup_.callerScript, lookup_.pc}; lookup_.str = lookupStr_; if (lookup_.str && IsEvalCacheCandidate(script_)) cx_->runtime()->evalCache.relookupOrAdd(p_, lookup_, cacheEntry); } }
AsmJSActivation::~AsmJSActivation() { if (profiler_) profiler_->exitNative(); JSContext *cx = cx_->asJSContext(); JS_ASSERT(cx->mainThread().asmJSActivationStack_ == this); JSRuntime::AutoLockForInterrupt lock(cx->runtime()); cx->mainThread().asmJSActivationStack_ = prevAsmJS_; }
JS_XDRDestroy(JSXDRState *xdr) { JSContext *cx = xdr->cx; xdr->ops->finalize(xdr); if (xdr->registry) { cx->free_(xdr->registry); if (xdr->reghash) JS_DHashTableDestroy((JSDHashTable *) xdr->reghash); } cx->free_(xdr); }
static inline bool UncachedInlineCall(VMFrame &f, uint32 flags, void **pret, bool *unjittable, uint32 argc) { JSContext *cx = f.cx; Value *vp = f.regs.sp - (argc + 2); JSObject &callee = vp->toObject(); JSFunction *newfun = callee.getFunctionPrivate(); JSScript *newscript = newfun->script(); /* Get pointer to new frame/slots, prepare arguments. */ StackSpace &stack = cx->stack(); JSStackFrame *newfp = stack.getInlineFrameWithinLimit(cx, f.regs.sp, argc, newfun, newscript, &flags, f.entryfp, &f.stackLimit); if (JS_UNLIKELY(!newfp)) return false; /* Initialize frame, locals. */ newfp->initCallFrame(cx, callee, newfun, argc, flags); SetValueRangeToUndefined(newfp->slots(), newscript->nfixed); /* Officially push the frame. */ stack.pushInlineFrame(cx, newscript, newfp, &f.regs); JS_ASSERT(newfp == f.regs.fp); /* Scope with a call object parented by callee's parent. */ if (newfun->isHeavyweight() && !js::CreateFunCallObject(cx, newfp)) return false; /* Try to compile if not already compiled. */ if (newscript->getJITStatus(newfp->isConstructing()) == JITScript_None) { CompileStatus status = CanMethodJIT(cx, newscript, newfp, CompileRequest_Interpreter); if (status == Compile_Error) { /* A runtime exception was thrown, get out. */ InlineReturn(f); return false; } if (status == Compile_Abort) *unjittable = true; } /* If newscript was successfully compiled, run it. */ if (JITScript *jit = newscript->getJIT(newfp->isConstructing())) { *pret = jit->invokeEntry; return true; } /* Otherwise, run newscript in the interpreter. */ bool ok = !!Interpret(cx, cx->fp()); InlineReturn(f); *pret = NULL; return ok; }
uint32_t jit::Bailout(BailoutStack *sp, BaselineBailoutInfo **bailoutInfo) { JSContext *cx = GetJSContextFromJitCode(); JS_ASSERT(bailoutInfo); // We don't have an exit frame. MOZ_ASSERT(IsInRange(FAKE_JIT_TOP_FOR_BAILOUT, 0, 0x1000) && IsInRange(FAKE_JIT_TOP_FOR_BAILOUT + sizeof(IonCommonFrameLayout), 0, 0x1000), "Fake jitTop pointer should be within the first page."); cx->mainThread().jitTop = FAKE_JIT_TOP_FOR_BAILOUT; gc::AutoSuppressGC suppress(cx); JitActivationIterator jitActivations(cx->runtime()); IonBailoutIterator iter(jitActivations, sp); JitActivation *activation = jitActivations->asJit(); TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); TraceLogTimestamp(logger, TraceLogger::Bailout); JitSpew(JitSpew_IonBailouts, "Took bailout! Snapshot offset: %d", iter.snapshotOffset()); JS_ASSERT(IsBaselineEnabled(cx)); *bailoutInfo = nullptr; uint32_t retval = BailoutIonToBaseline(cx, activation, iter, false, bailoutInfo); JS_ASSERT(retval == BAILOUT_RETURN_OK || retval == BAILOUT_RETURN_FATAL_ERROR || retval == BAILOUT_RETURN_OVERRECURSED); JS_ASSERT_IF(retval == BAILOUT_RETURN_OK, *bailoutInfo != nullptr); if (retval != BAILOUT_RETURN_OK) { // If the bailout failed, then bailout trampoline will pop the // current frame and jump straight to exception handling code when // this function returns. Any SPS entry pushed for this frame will // be silently forgotten. // // We call ExitScript here to ensure that if the ionScript had SPS // instrumentation, then the SPS entry for it is popped. // // However, if the bailout was during argument check, then a // pseudostack frame would not have been pushed in the first // place, so don't pop anything in that case. bool popSPSFrame = iter.ionScript()->hasSPSInstrumentation() && (SnapshotIterator(iter).bailoutKind() != Bailout_ArgumentCheck); JSScript *script = iter.script(); probes::ExitScript(cx, script, script->functionNonDelazifying(), popSPSFrame); EnsureExitFrame(iter.jsFrame()); } return retval; }
void JSRuntime::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::RuntimeSizes* rtSizes) { // Several tables in the runtime enumerated below can be used off thread. AutoLockForExclusiveAccess lock(this); // For now, measure the size of the derived class (JSContext). // TODO (bug 1281529): make memory reporting reflect the new // JSContext/JSRuntime world better. JSContext* cx = unsafeContextFromAnyThread(); rtSizes->object += mallocSizeOf(cx); rtSizes->atomsTable += atoms(lock).sizeOfIncludingThis(mallocSizeOf); if (!parentRuntime) { rtSizes->atomsTable += mallocSizeOf(staticStrings); rtSizes->atomsTable += mallocSizeOf(commonNames); rtSizes->atomsTable += permanentAtoms->sizeOfIncludingThis(mallocSizeOf); } rtSizes->contexts += cx->sizeOfExcludingThis(mallocSizeOf); rtSizes->temporary += tempLifoAlloc.sizeOfExcludingThis(mallocSizeOf); rtSizes->interpreterStack += interpreterStack_.sizeOfExcludingThis(mallocSizeOf); if (MathCache* cache = cx->caches.maybeGetMathCache()) rtSizes->mathCache += cache->sizeOfIncludingThis(mallocSizeOf); if (sharedImmutableStrings_) { rtSizes->sharedImmutableStringsCache += sharedImmutableStrings_->sizeOfExcludingThis(mallocSizeOf); } rtSizes->uncompressedSourceCache += cx->caches.uncompressedSourceCache.sizeOfExcludingThis(mallocSizeOf); rtSizes->scriptData += scriptDataTable(lock).sizeOfExcludingThis(mallocSizeOf); for (ScriptDataTable::Range r = scriptDataTable(lock).all(); !r.empty(); r.popFront()) rtSizes->scriptData += mallocSizeOf(r.front()); if (jitRuntime_) { jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code); jitRuntime_->backedgeExecAlloc().addSizeOfCode(&rtSizes->code); } rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf); rtSizes->gc.nurseryCommitted += gc.nursery.sizeOfHeapCommitted(); rtSizes->gc.nurseryMallocedBuffers += gc.nursery.sizeOfMallocedBuffers(mallocSizeOf); gc.storeBuffer.addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc); }
JSAtom * StringBuffer::finishAtom() { JSContext *cx = context(); size_t length = cb.length(); if (length == 0) return cx->names().empty; JSAtom *atom = AtomizeChars(cx, cb.begin(), length); cb.clear(); return atom; }
JSObject CameraMediaItemType_to_js(const JSContext& js_context, const CameraMediaItemType& config) { auto object = js_context.CreateObject(); object.SetProperty("code", js_context.CreateNumber(config.code)); object.SetProperty("cropRect", Titanium::UI::Dimension_to_js(js_context, config.cropRect)); object.SetProperty("error", js_context.CreateString(config.error)); if (config.media != nullptr) { object.SetProperty("media", config.media->get_object()); } object.SetProperty("mediaType", js_context.CreateNumber(static_cast<std::uint32_t>(config.mediaType))); object.SetProperty("success", js_context.CreateBoolean(config.success)); return object; }
void lookupInEvalCache(JSLinearString *str, JSScript *callerScript, jsbytecode *pc) { lookupStr_ = str; lookup_.str = str; lookup_.callerScript = callerScript; lookup_.version = cx_->findVersion(); lookup_.pc = pc; p_ = cx_->runtime()->evalCache.lookupForAdd(lookup_); if (p_) { script_ = p_->script; cx_->runtime()->evalCache.remove(p_); script_->uncacheForEval(); } }
/* * Evaluate frames newer than the entry frame until all are gone. This will * always leave f.regs.fp == entryFrame. */ static bool FinishExcessFrames(VMFrame &f, JSStackFrame *entryFrame) { JSContext *cx = f.cx; while (cx->fp() != entryFrame || entryFrame->hasImacropc()) { if (!EvaluateExcessFrame(f, entryFrame)) { if (!HandleErrorInExcessFrame(f, entryFrame)) return false; } } return true; }