void js::DestroyContext(JSContext *cx, DestroyContextMode mode) { JSRuntime *rt = cx->runtime; JS_AbortIfWrongThread(rt); #ifdef JS_THREADSAFE if (cx->outstandingRequests != 0) MOZ_CRASH(); #endif if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { JS_ASSERT(!rt->isHeapBusy()); /* * Dump remaining type inference results first. This printing * depends on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->types.print(cx, false); /* Off thread ion compilations depend on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) CancelOffThreadIonCompile(c, NULL); /* Unpin all common names before final GC. */ FinishCommonNames(rt); /* Clear debugging state to remove GC roots. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->clearTraps(rt->defaultFreeOp()); JS_ClearAllWatchPoints(cx); /* Clear the statics table to remove GC roots. */ rt->staticStrings.finish(); rt->finishSelfHosting(); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::LAST_CONTEXT); } else if (mode == DCM_FORCE_GC) { JS_ASSERT(!rt->isHeapBusy()); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::DESTROY_CONTEXT); } js_delete_poison(cx); }
void js::DestroyContext(JSContext *cx, DestroyContextMode mode) { JSRuntime *rt = cx->runtime; JS_AbortIfWrongThread(rt); JS_ASSERT(!cx->enumerators); #ifdef JS_THREADSAFE JS_ASSERT(cx->outstandingRequests == 0); #endif if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY)); } } JS_REMOVE_LINK(&cx->link); bool last = !rt->hasContexts(); if (last) { JS_ASSERT(!rt->isHeapBusy()); /* * Dump remaining type inference results first. This printing * depends on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->types.print(cx, false); /* Unpin all common atoms before final GC. */ FinishCommonAtoms(rt); /* Clear debugging state to remove GC roots. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->clearTraps(rt->defaultFreeOp()); JS_ClearAllWatchPoints(cx); PrepareForFullGC(rt); GC(rt, GC_NORMAL, gcreason::LAST_CONTEXT); } else if (mode == DCM_FORCE_GC) { JS_ASSERT(!rt->isHeapBusy()); PrepareForFullGC(rt); GC(rt, GC_NORMAL, gcreason::DESTROY_CONTEXT); } Foreground::delete_(cx); }
/* static */ void * GCRuntime::refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind) { JSRuntime *rt = cx->runtime(); MOZ_ASSERT(!rt->isHeapBusy(), "allocating while under GC"); MOZ_ASSERT_IF(allowGC, !rt->currentThreadHasExclusiveAccess()); // Try to allocate; synchronize with background GC threads if necessary. void *thing = tryRefillFreeListFromMainThread(cx, thingKind); if (MOZ_LIKELY(thing)) return thing; // Perform a last-ditch GC to hopefully free up some memory. { // If we are doing a fallible allocation, percolate up the OOM // instead of reporting it. if (!allowGC) return nullptr; JS::PrepareForFullGC(rt); AutoKeepAtoms keepAtoms(cx->perThreadData); rt->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH); } // Retry the allocation after the last-ditch GC. thing = tryRefillFreeListFromMainThread(cx, thingKind); if (thing) return thing; // We are really just totally out of memory. MOZ_ASSERT(allowGC, "A fallible allocation must not report OOM on failure."); ReportOutOfMemory(cx); return nullptr; }
void js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data) { JSRuntime* rt = zone->runtimeFromMainThread(); MOZ_ASSERT(!rt->isHeapBusy()); AutoPrepareForTracing prep(rt->contextFromMainThread(), SkipAtoms); ::IterateGrayObjects(zone, cellCallback, data); }
/* static */ void* GCRuntime::refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind) { ArenaLists* arenas = cx->arenas(); Zone* zone = cx->zone(); JSRuntime* rt = zone->runtimeFromAnyThread(); AutoMaybeStartBackgroundAllocation maybeStartBGAlloc; // If we're off the main thread, we try to allocate once and return // whatever value we get. We need to first ensure the main thread is not in // a GC session. AutoLockHelperThreadState lock; while (rt->isHeapBusy()) HelperThreadState().wait(GlobalHelperThreadState::PRODUCER); return arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc); }
void js::DestroyContext(JSContext *cx, DestroyContextMode mode) { JSRuntime *rt = cx->runtime(); JS_AbortIfWrongThread(rt); #ifdef JS_THREADSAFE if (cx->outstandingRequests != 0) MOZ_CRASH(); #endif #if (defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)) && defined(DEBUG) for (int i = 0; i < THING_ROOT_LIMIT; ++i) JS_ASSERT(cx->thingGCRooters[i] == nullptr); #endif if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY, rt->cxCallbackData)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { /* * Dump remaining type inference results while we still have a context. * This printing depends on atoms still existing. */ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) c->types.print(cx, false); } if (mode == DCM_FORCE_GC) { JS_ASSERT(!rt->isHeapBusy()); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::DESTROY_CONTEXT); } js_delete_poison(cx); }
void js::DestroyContext(JSContext* cx, DestroyContextMode mode) { JSRuntime* rt = cx->runtime(); JS_AbortIfWrongThread(rt); if (cx->outstandingRequests != 0) MOZ_CRASH("Attempted to destroy a context while it is in a request."); cx->roots.checkNoGCRooters(); FinishPersistentRootedChains(cx->roots); if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY, rt->cxCallbackData)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { /* * Dump remaining type inference results while we still have a context. * This printing depends on atoms still existing. */ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) PrintTypes(cx, c, false); } if (mode == DCM_FORCE_GC) { MOZ_ASSERT(!rt->isHeapBusy()); JS::PrepareForFullGC(rt); rt->gc.gc(GC_NORMAL, JS::gcreason::DESTROY_CONTEXT); } js_delete_poison(cx); }
void js::DestroyContext(JSContext *cx, DestroyContextMode mode) { JSRuntime *rt = cx->runtime(); JS_AbortIfWrongThread(rt); #ifdef JS_THREADSAFE if (cx->outstandingRequests != 0) MOZ_CRASH(); #endif #if (defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)) && defined(DEBUG) for (int i = 0; i < THING_ROOT_LIMIT; ++i) JS_ASSERT(cx->thingGCRooters[i] == NULL); #endif if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY, rt->cxCallbackData)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { JS_ASSERT(!rt->isHeapBusy()); /* * Dump remaining type inference results first. This printing * depends on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->types.print(cx, false); /* Off thread compilation and parsing depend on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) CancelOffThreadIonCompile(c, NULL); WaitForOffThreadParsingToFinish(rt); #ifdef JS_WORKER_THREADS if (rt->workerThreadState) rt->workerThreadState->cleanup(rt); #endif /* Unpin all common names before final GC. */ FinishCommonNames(rt); /* Clear debugging state to remove GC roots. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->clearTraps(rt->defaultFreeOp()); JS_ClearAllWatchPoints(cx); /* Clear the statics table to remove GC roots. */ rt->staticStrings.finish(); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::LAST_CONTEXT); /* * Clear the self-hosted global and delete self-hosted classes *after* * GC, as finalizers for objects check for clasp->finalize during GC. */ rt->finishSelfHosting(); } else if (mode == DCM_FORCE_GC) { JS_ASSERT(!rt->isHeapBusy()); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::DESTROY_CONTEXT); } js_delete_poison(cx); }
bool ForkJoinShared::check(ForkJoinSlice &slice) { JS_ASSERT(cx_->runtime->interrupt); if (abort_) return false; if (slice.isMainThread()) { // We are the main thread: therefore we must // (1) initiate the rendezvous; // (2) if GC was requested, reinvoke trigger // which will do various non-thread-safe // preparatory steps. We then invoke // a non-incremental GC manually. // (3) run the operation callback, which // would normally run the GC but // incrementally, which we do not want. JSRuntime *rt = cx_->runtime; // Calls to js::TriggerGC() should have been redirected to // requestGC(), and thus the gcIsNeeded flag is not set yet. JS_ASSERT(!rt->gcIsNeeded); if (gcRequested_ && rt->isHeapBusy()) { // Cannot call GCSlice when heap busy, so abort. Easier // right now to abort rather than prove it cannot arise, // and safer for short-term than asserting !isHeapBusy. setAbortFlag(false); records_->setCause(ParallelBailoutHeapBusy, NULL, NULL); return false; } // (1). Initiaize the rendezvous and record stack extents. AutoRendezvous autoRendezvous(slice); AutoMarkWorldStoppedForGC autoMarkSTWFlag(slice); slice.recordStackExtent(); AutoInstallForkJoinStackExtents extents(rt, &stackExtents_[0]); // (2). Note that because we are in a STW section, calls to // js::TriggerGC() etc will not re-invoke // ForkJoinSlice::requestGC(). triggerGCIfRequested(); // (2b) Run the GC if it is required. This would occur as // part of js_InvokeOperationCallback(), but we want to avoid // an incremental GC. if (rt->gcIsNeeded) { GC(rt, GC_NORMAL, gcReason_); } // (3). Invoke the callback and abort if it returns false. if (!js_InvokeOperationCallback(cx_)) { records_->setCause(ParallelBailoutInterrupt, NULL, NULL); setAbortFlag(true); return false; } return true; } else if (rendezvous_) { slice.recordStackExtent(); joinRendezvous(slice); } return true; }