JSCompartment::~JSCompartment() { reportTelemetry(); // Write the code coverage information in a file. JSRuntime* rt = runtimeFromActiveCooperatingThread(); if (rt->lcovOutput().isEnabled()) rt->lcovOutput().writeLCovResult(lcovOutput); js_delete(jitCompartment_); js_delete(scriptCountsMap); js_delete(scriptNameMap); js_delete(debugScriptMap); js_delete(debugEnvs); js_delete(objectMetadataTable); js_delete(lazyArrayBuffers); js_delete(nonSyntacticLexicalEnvironments_); js_free(enumerators); #ifdef DEBUG // Avoid assertion destroying the unboxed layouts list if the embedding // leaked GC things. if (!rt->gc.shutdownCollectedEverything()) unboxedLayouts.clear(); #endif runtime_->numCompartments--; }
IonBailoutIterator::IonBailoutIterator(const JitActivationIterator &activations, BailoutStack *bailout) : IonFrameIterator(activations), machine_(bailout->machine()) { uint8_t *sp = bailout->parentStackPointer(); uint8_t *fp = sp + bailout->frameSize(); current_ = fp; type_ = IonFrame_OptimizedJS; topFrameSize_ = current_ - sp; topIonScript_ = script()->ionScript(); if (bailout->frameClass() == FrameSizeClass::None()) { snapshotOffset_ = bailout->snapshotOffset(); return; } // Compute the snapshot offset from the bailout ID. JitActivation *activation = activations.activation()->asJit(); JSRuntime *rt = activation->compartment()->runtimeFromMainThread(); JitCode *code = rt->jitRuntime()->getBailoutTable(bailout->frameClass()); uintptr_t tableOffset = bailout->tableOffset(); uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw()); JS_ASSERT(tableOffset >= tableStart && tableOffset < tableStart + code->instructionsSize()); JS_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0); uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1; JS_ASSERT(bailoutId < BAILOUT_TABLE_SIZE); snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId); }
JSContext* js::NewContext(uint32_t maxBytes, uint32_t maxNurseryBytes, JSRuntime* parentRuntime) { AutoNoteSingleThreadedRegion anstr; MOZ_RELEASE_ASSERT(!TlsContext.get()); JSRuntime* runtime = js_new<JSRuntime>(parentRuntime); if (!runtime) return nullptr; JSContext* cx = js_new<JSContext>(runtime, JS::ContextOptions()); if (!cx) { js_delete(runtime); return nullptr; } if (!runtime->init(cx, maxBytes, maxNurseryBytes)) { runtime->destroyRuntime(); js_delete(cx); js_delete(runtime); return nullptr; } if (!cx->init(ContextKind::Cooperative)) { runtime->destroyRuntime(); js_delete(cx); js_delete(runtime); return nullptr; } return cx; }
BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator &activations, BailoutStack *bailout) : machine_(bailout->machine()) { uint8_t *sp = bailout->parentStackPointer(); framePointer_ = sp + bailout->frameSize(); topFrameSize_ = framePointer_ - sp; JSScript *script = ScriptFromCalleeToken(((JitFrameLayout *) framePointer_)->calleeToken()); JitActivation *activation = activations.activation()->asJit(); topIonScript_ = script->ionScript(); attachOnJitActivation(activations); if (bailout->frameClass() == FrameSizeClass::None()) { snapshotOffset_ = bailout->snapshotOffset(); return; } // Compute the snapshot offset from the bailout ID. JSRuntime *rt = activation->compartment()->runtimeFromMainThread(); JitCode *code = rt->jitRuntime()->getBailoutTable(bailout->frameClass()); uintptr_t tableOffset = bailout->tableOffset(); uintptr_t tableStart = reinterpret_cast<uintptr_t>(Assembler::BailoutTableStart(code->raw())); MOZ_ASSERT(tableOffset >= tableStart && tableOffset < tableStart + code->instructionsSize()); MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0); uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1; MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE); snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId); }
/* static */ void * GCRuntime::refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind) { JSRuntime *rt = cx->runtime(); MOZ_ASSERT(!rt->isHeapBusy(), "allocating while under GC"); MOZ_ASSERT_IF(allowGC, !rt->currentThreadHasExclusiveAccess()); // Try to allocate; synchronize with background GC threads if necessary. void *thing = tryRefillFreeListFromMainThread(cx, thingKind); if (MOZ_LIKELY(thing)) return thing; // Perform a last-ditch GC to hopefully free up some memory. { // If we are doing a fallible allocation, percolate up the OOM // instead of reporting it. if (!allowGC) return nullptr; JS::PrepareForFullGC(rt); AutoKeepAtoms keepAtoms(cx->perThreadData); rt->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH); } // Retry the allocation after the last-ditch GC. thing = tryRefillFreeListFromMainThread(cx, thingKind); if (thing) return thing; // We are really just totally out of memory. MOZ_ASSERT(allowGC, "A fallible allocation must not report OOM on failure."); ReportOutOfMemory(cx); return nullptr; }
SharedArrayRawBuffer* SharedArrayRawBuffer::New(JSContext* cx, uint32_t length) { // The value (uint32_t)-1 is used as a signal in various places, // so guard against it on principle. MOZ_ASSERT(length != (uint32_t)-1); // Add a page for the header and round to a page boundary. uint32_t allocSize = SharedArrayAllocSize(length); if (allocSize <= length) return nullptr; bool preparedForAsmJS = jit::JitOptions.asmJSAtomicsEnable && IsValidAsmJSHeapLength(length); void* p = nullptr; if (preparedForAsmJS) { // Test >= to guard against the case where multiple extant runtimes // race to allocate. if (++numLive >= maxLive) { JSRuntime* rt = cx->runtime(); if (rt->largeAllocationFailureCallback) rt->largeAllocationFailureCallback(rt->largeAllocationFailureCallbackData); if (numLive >= maxLive) { numLive--; return nullptr; } } uint32_t mappedSize = SharedArrayMappedSize(allocSize); // Get the entire reserved region (with all pages inaccessible) p = MapMemory(mappedSize, false); if (!p) { numLive--; return nullptr; } if (!MarkValidRegion(p, allocSize)) { UnmapMemory(p, mappedSize); numLive--; return nullptr; } # if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) // Tell Valgrind/Memcheck to not report accesses in the inaccessible region. VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize, mappedSize - allocSize); # endif } else { p = MapMemory(allocSize, true); if (!p) return nullptr; } uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize(); uint8_t* base = buffer - sizeof(SharedArrayRawBuffer); SharedArrayRawBuffer* rawbuf = new (base) SharedArrayRawBuffer(buffer, length, preparedForAsmJS); MOZ_ASSERT(rawbuf->length == length); // Deallocation needs this return rawbuf; }
void js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data) { JSRuntime* rt = zone->runtimeFromMainThread(); MOZ_ASSERT(!rt->isHeapBusy()); AutoPrepareForTracing prep(rt->contextFromMainThread(), SkipAtoms); ::IterateGrayObjects(zone, cellCallback, data); }
void js::DestroyContext(JSContext *cx, DestroyContextMode mode) { JSRuntime *rt = cx->runtime; JS_AbortIfWrongThread(rt); JS_ASSERT(!cx->enumerators); #ifdef JS_THREADSAFE JS_ASSERT(cx->outstandingRequests == 0); #endif if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { JS_ASSERT(!rt->isHeapBusy()); /* * Dump remaining type inference results first. This printing * depends on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->types.print(cx, false); /* Off thread ion compilations depend on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) CancelOffThreadIonCompile(c, NULL); /* Unpin all common names before final GC. */ FinishCommonNames(rt); /* Clear debugging state to remove GC roots. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->clearTraps(rt->defaultFreeOp()); JS_ClearAllWatchPoints(cx); /* Clear the statics table to remove GC roots. */ rt->staticStrings.finish(); PrepareForFullGC(rt); GC(rt, GC_NORMAL, gcreason::LAST_CONTEXT); } else if (mode == DCM_FORCE_GC) { JS_ASSERT(!rt->isHeapBusy()); PrepareForFullGC(rt); GC(rt, GC_NORMAL, gcreason::DESTROY_CONTEXT); } js_delete(cx); }
void WorkerThreadState::finishParseTaskForScript(JSScript *script) { JSRuntime *rt = script->compartment()->runtimeFromMainThread(); ParseTask *parseTask = NULL; { AutoLockWorkerThreadState lock(*rt->workerThreadState); for (size_t i = 0; i < parseFinishedList.length(); i++) { if (parseFinishedList[i]->script == script) { parseTask = parseFinishedList[i]; parseFinishedList[i] = parseFinishedList.back(); parseFinishedList.popBack(); break; } } } JS_ASSERT(parseTask); // Mark the zone as no longer in use by an ExclusiveContext, and available // to be collected by the GC. rt->clearUsedByExclusiveThread(parseTask->zone); if (!script) { // Parsing failed and there is nothing to finish, but there still may // be lingering ParseTask instances holding roots which need to be // cleaned up. The ParseTask which we picked might not be the right // one but this is ok as finish calls will be 1:1 with calls that // create a ParseTask. js_delete(parseTask); return; } // Point the prototypes of any objects in the script's compartment to refer // to the corresponding prototype in the new compartment. This will briefly // create cross compartment pointers, which will be fixed by the // MergeCompartments call below. for (gc::CellIter iter(parseTask->zone, gc::FINALIZE_TYPE_OBJECT); !iter.done(); iter.next()) { types::TypeObject *object = iter.get<types::TypeObject>(); JSObject *proto = object->proto; if (!proto) continue; JSProtoKey key = js_IdentifyClassPrototype(proto); if (key == JSProto_Null) continue; JSObject *newProto = GetClassPrototypePure(&parseTask->scopeChain->global(), key); JS_ASSERT(newProto); object->proto = newProto; } // Move the parsed script and all its contents into the desired compartment. gc::MergeCompartments(parseTask->script->compartment(), parseTask->scopeChain->compartment()); js_delete(parseTask); }
void js::TraceRuntime(JSTracer* trc) { MOZ_ASSERT(!trc->isMarkingTracer()); JSRuntime* rt = trc->runtime(); rt->gc.evictNursery(); AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms); gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP); rt->gc.markRuntime(trc, GCRuntime::TraceRuntime, prep.session().lock); }
void js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data) { JSRuntime* rt = zone->runtimeFromMainThread(); AutoEmptyNursery empty(rt); AutoPrepareForTracing prep(rt->contextFromMainThread(), SkipAtoms); for (auto thingKind : ObjectAllocKinds()) { for (auto obj = zone->cellIter<JSObject>(thingKind, empty); !obj.done(); obj.next()) { if (obj->asTenured().isMarked(GRAY)) cellCallback(data, JS::GCCellPtr(obj.get())); } } }
void js::MarkAtoms(JSTracer* trc) { JSRuntime* rt = trc->runtime(); for (AtomSet::Enum e(rt->atoms()); !e.empty(); e.popFront()) { const AtomStateEntry& entry = e.front(); if (!entry.isPinned()) continue; JSAtom* atom = entry.asPtrUnbarriered(); TraceRoot(trc, &atom, "interned_atom"); MOZ_ASSERT(entry.asPtrUnbarriered() == atom); } }
SharedArrayRawBuffer* SharedArrayRawBuffer::New(JSContext* cx, uint32_t length) { // The value (uint32_t)-1 is used as a signal in various places, // so guard against it on principle. MOZ_ASSERT(length != (uint32_t)-1); // Add a page for the header and round to a page boundary. uint32_t allocSize = (length + 2*AsmJSPageSize - 1) & ~(AsmJSPageSize - 1); if (allocSize <= length) return nullptr; #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) // Test >= to guard against the case where multiple extant runtimes // race to allocate. if (++numLive >= maxLive) { JSRuntime* rt = cx->runtime(); if (rt->largeAllocationFailureCallback) rt->largeAllocationFailureCallback(rt->largeAllocationFailureCallbackData); if (numLive >= maxLive) { numLive--; return nullptr; } } // Get the entire reserved region (with all pages inaccessible) void* p = MapMemory(SharedArrayMappedSize, false); if (!p) { numLive--; return nullptr; } if (!MarkValidRegion(p, allocSize)) { UnmapMemory(p, SharedArrayMappedSize); numLive--; return nullptr; } # if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) // Tell Valgrind/Memcheck to not report accesses in the inaccessible region. VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize, SharedArrayMappedSize - allocSize); # endif #else void* p = MapMemory(allocSize, true); if (!p) return nullptr; #endif uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + AsmJSPageSize; uint8_t* base = buffer - sizeof(SharedArrayRawBuffer); return new (base) SharedArrayRawBuffer(buffer, length); }
void js::MarkAtoms(JSTracer *trc) { JSRuntime *rt = trc->runtime; for (AtomSet::Enum e(rt->atoms()); !e.empty(); e.popFront()) { const AtomStateEntry &entry = e.front(); if (!entry.isTagged()) continue; JSAtom *atom = entry.asPtr(); bool tagged = entry.isTagged(); MarkStringRoot(trc, &atom, "interned_atom"); if (entry.asPtr() != atom) e.rekeyFront(AtomHasher::Lookup(atom), AtomStateEntry(atom, tagged)); } }
void CheckHeapTracer::check(AutoLockForExclusiveAccess& lock) { // The analysis thinks that traceRuntime might GC by calling a GC callback. JS::AutoSuppressGCAnalysis nogc; if (!rt->isBeingDestroyed()) rt->gc.traceRuntime(this, lock); while (!stack.empty()) { WorkItem item = stack.back(); if (item.processed) { stack.popBack(); } else { parentIndex = stack.length() - 1; TraceChildren(this, item.thing); stack.back().processed = true; } } if (oom) return; if (failures) { fprintf(stderr, "Heap check: %zu failure(s) out of %" PRIu32 " pointers checked\n", failures, visited.count()); } MOZ_RELEASE_ASSERT(failures == 0); }
JSContext* js::NewCooperativeContext(JSContext* siblingContext) { MOZ_RELEASE_ASSERT(!TlsContext.get()); JSRuntime* runtime = siblingContext->runtime(); JSContext* cx = js_new<JSContext>(runtime, JS::ContextOptions()); if (!cx || !cx->init(ContextKind::Cooperative)) { js_delete(cx); return nullptr; } runtime->setNewbornActiveContext(cx); return cx; }
MinorCollectionTracer(JSRuntime *rt, Nursery *nursery) : JSTracer(), nursery(nursery), runtime(rt), session(runtime, MinorCollecting), head(NULL), tail(&head), savedNeedsBarrier(runtime->needsBarrier()), disableStrictProxyChecking(runtime) { JS_TracerInit(this, runtime, Nursery::MinorGCCallback); eagerlyTraceWeakMaps = TraceWeakMapKeysValues; runtime->gcNumber++; runtime->setNeedsBarrier(false); for (ZonesIter zone(rt); !zone.done(); zone.next()) zone->saveNeedsBarrier(false); }
/* static */ void* GCRuntime::refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind) { ArenaLists* arenas = cx->arenas(); Zone* zone = cx->zone(); JSRuntime* rt = zone->runtimeFromAnyThread(); AutoMaybeStartBackgroundAllocation maybeStartBGAlloc; // If we're off the main thread, we try to allocate once and return // whatever value we get. We need to first ensure the main thread is not in // a GC session. AutoLockHelperThreadState lock; while (rt->isHeapBusy()) HelperThreadState().wait(GlobalHelperThreadState::PRODUCER); return arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc); }
void js::DestroyContext(JSContext *cx, DestroyContextMode mode) { JSRuntime *rt = cx->runtime(); JS_AbortIfWrongThread(rt); #ifdef JS_THREADSAFE if (cx->outstandingRequests != 0) MOZ_CRASH(); #endif #if (defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)) && defined(DEBUG) for (int i = 0; i < THING_ROOT_LIMIT; ++i) JS_ASSERT(cx->thingGCRooters[i] == nullptr); #endif if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY, rt->cxCallbackData)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { /* * Dump remaining type inference results while we still have a context. * This printing depends on atoms still existing. */ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) c->types.print(cx, false); } if (mode == DCM_FORCE_GC) { JS_ASSERT(!rt->isHeapBusy()); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::DESTROY_CONTEXT); } js_delete_poison(cx); }
uint32_t Table::grow(uint32_t delta, JSContext* cx) { // This isn't just an optimization: movingGrowable() assumes that // onMovingGrowTable does not fire when length == maximum. if (!delta) return length_; uint32_t oldLength = length_; CheckedInt<uint32_t> newLength = oldLength; newLength += delta; if (!newLength.isValid() || newLength.value() > MaxTableLength) return -1; if (maximum_ && newLength.value() > maximum_.value()) return -1; MOZ_ASSERT(movingGrowable()); JSRuntime* rt = cx; // Use JSRuntime's MallocProvider to avoid throwing. // Note that realloc does not release array_'s pointee (which is returned by // externalArray()) on failure which is exactly what we need here. ExternalTableElem* newArray = rt->pod_realloc(externalArray(), length_, newLength.value()); if (!newArray) return -1; Unused << array_.release(); array_.reset((uint8_t*)newArray); // Realloc does not zero the delta for us. PodZero(newArray + length_, delta); length_ = newLength.value(); if (observers_.initialized()) { for (InstanceSet::Range r = observers_.all(); !r.empty(); r.popFront()) r.front()->instance().onMovingGrowTable(); } return oldLength; }
void js::DestroyContext(JSContext* cx, DestroyContextMode mode) { JSRuntime* rt = cx->runtime(); JS_AbortIfWrongThread(rt); if (cx->outstandingRequests != 0) MOZ_CRASH("Attempted to destroy a context while it is in a request."); cx->roots.checkNoGCRooters(); FinishPersistentRootedChains(cx->roots); if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY, rt->cxCallbackData)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { /* * Dump remaining type inference results while we still have a context. * This printing depends on atoms still existing. */ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) PrintTypes(cx, c, false); } if (mode == DCM_FORCE_GC) { MOZ_ASSERT(!rt->isHeapBusy()); JS::PrepareForFullGC(rt); rt->gc.gc(GC_NORMAL, JS::gcreason::DESTROY_CONTEXT); } js_delete_poison(cx); }
IonBailoutIterator::IonBailoutIterator(const JitActivationIterator &activations, BailoutStack *bailout) : JitFrameIterator(activations), machine_(bailout->machine()) { uint8_t *sp = bailout->parentStackPointer(); uint8_t *fp = sp + bailout->frameSize(); kind_ = Kind_BailoutIterator; current_ = fp; type_ = JitFrame_IonJS; topFrameSize_ = current_ - sp; switch (mode_) { case SequentialExecution: topIonScript_ = script()->ionScript(); break; case ParallelExecution: topIonScript_ = script()->parallelIonScript(); break; default: MOZ_ASSUME_UNREACHABLE("No such execution mode"); } if (bailout->frameClass() == FrameSizeClass::None()) { snapshotOffset_ = bailout->snapshotOffset(); return; } // Compute the snapshot offset from the bailout ID. JitActivation *activation = activations.activation()->asJit(); JSRuntime *rt = activation->compartment()->runtimeFromMainThread(); JitCode *code = rt->jitRuntime()->getBailoutTable(bailout->frameClass()); uintptr_t tableOffset = bailout->tableOffset(); uintptr_t tableStart = reinterpret_cast<uintptr_t>(Assembler::BailoutTableStart(code->raw())); JS_ASSERT(tableOffset >= tableStart && tableOffset < tableStart + code->instructionsSize()); JS_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0); uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1; JS_ASSERT(bailoutId < BAILOUT_TABLE_SIZE); snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId); }
JSContext* js::NewContext(uint32_t maxBytes, uint32_t maxNurseryBytes, JSRuntime* parentRuntime) { AutoNoteSingleThreadedRegion anstr; MOZ_RELEASE_ASSERT(!TlsContext.get()); #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT) js::oom::SetThreadType(!parentRuntime ? js::THREAD_TYPE_COOPERATING : js::THREAD_TYPE_WORKER); #endif JSRuntime* runtime = js_new<JSRuntime>(parentRuntime); if (!runtime) return nullptr; JSContext* cx = js_new<JSContext>(runtime, JS::ContextOptions()); if (!cx) { js_delete(runtime); return nullptr; } if (!runtime->init(cx, maxBytes, maxNurseryBytes)) { runtime->destroyRuntime(); js_delete(cx); js_delete(runtime); return nullptr; } if (!cx->init(ContextKind::Cooperative)) { runtime->destroyRuntime(); js_delete(cx); js_delete(runtime); return nullptr; } return cx; }
void HelperThread::handleIonWorkload() { MOZ_ASSERT(HelperThreadState().isLocked()); MOZ_ASSERT(HelperThreadState().canStartIonCompile()); MOZ_ASSERT(idle()); // Find the IonBuilder in the worklist with the highest priority, and // remove it from the worklist. jit::IonBuilder* builder = HelperThreadState().highestPriorityPendingIonCompile(/* remove = */ true); // If there are now too many threads with active IonBuilders, indicate to // the one with the lowest priority that it should pause. Note that due to // builder priorities changing since pendingIonCompileHasSufficientPriority // was called, the builder we are pausing may actually be higher priority // than the one we are about to start. Oh well. if (HelperThread* other = HelperThreadState().lowestPriorityUnpausedIonCompileAtThreshold()) { MOZ_ASSERT(other->ionBuilder() && !other->pause); other->pause = true; } currentTask.emplace(builder); builder->setPauseFlag(&pause); TraceLoggerThread* logger = TraceLoggerForCurrentThread(); TraceLoggerEvent event(logger, TraceLogger_AnnotateScripts, builder->script()); AutoTraceLog logScript(logger, event); AutoTraceLog logCompile(logger, TraceLogger_IonCompilation); JSRuntime* rt = builder->script()->compartment()->runtimeFromAnyThread(); { AutoUnlockHelperThreadState unlock; PerThreadData::AutoEnterRuntime enter(threadData.ptr(), builder->script()->runtimeFromAnyThread()); jit::JitContext jctx(jit::CompileRuntime::get(rt), jit::CompileCompartment::get(builder->script()->compartment()), &builder->alloc()); builder->setBackgroundCodegen(jit::CompileBackEnd(builder)); } FinishOffThreadIonCompile(builder); currentTask.reset(); pause = false; // Ping the main thread so that the compiled code can be incorporated // at the next interrupt callback. Don't interrupt Ion code for this, as // this incorporation can be delayed indefinitely without affecting // performance as long as the main thread is actually executing Ion code. rt->requestInterrupt(JSRuntime::RequestInterruptCanWait); // Notify the main thread in case it is waiting for the compilation to finish. HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER); // When finishing Ion compilation jobs, we can start unpausing compilation // threads that were paused to restrict the number of active compilations. // Only unpause one at a time, to make sure we don't exceed the restriction. // Since threads are currently only paused for Ion compilations, this // strategy will eventually unpause all paused threads, regardless of how // many there are, since each thread we unpause will eventually finish and // end up back here. if (HelperThread* other = HelperThreadState().highestPriorityPausedIonCompile()) { MOZ_ASSERT(other->ionBuilder() && other->pause); // Only unpause the other thread if there isn't a higher priority // builder which this thread or another can start on. jit::IonBuilder* builder = HelperThreadState().highestPriorityPendingIonCompile(); if (!builder || IonBuilderHasHigherPriority(other->ionBuilder(), builder)) { other->pause = false; // Notify all paused threads, to make sure the one we just // unpaused wakes up. HelperThreadState().notifyAll(GlobalHelperThreadState::PAUSE); } } }
// Be very cautious and default to not handling; we don't want to accidentally // silence real crashes from real bugs. static bool HandleSignal(int signum, siginfo_t *info, void *ctx) { CONTEXT *context = (CONTEXT *)ctx; uint8_t **ppc = ContextToPC(context); uint8_t *pc = *ppc; void *faultingAddress = info->si_addr; JSRuntime *rt = RuntimeForCurrentThread(); // Don't allow recursive handling of signals, see AutoSetHandlingSignal. if (!rt || rt->handlingSignal) return false; AutoSetHandlingSignal handling(rt); if (rt->jitRuntime() && rt->jitRuntime()->handleAccessViolation(rt, faultingAddress)) return true; AsmJSActivation *activation = InnermostAsmJSActivation(); if (!activation) return false; const AsmJSModule &module = activation->module(); if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) { JSRuntime::AutoLockForInterrupt lock(rt); module.unprotectCode(rt); return true; } if (!module.containsPC(pc)) return false; // If we faulted trying to execute code in 'module', this must be an // interrupt callback (see RequestInterruptForAsmJSCode). Redirect // execution to a trampoline which will call js::HandleExecutionInterrupt. // The trampoline will jump to activation->resumePC if execution isn't // interrupted. if (module.containsPC(faultingAddress)) { activation->setInterrupted(pc); *ppc = module.interruptExit(); JSRuntime::AutoLockForInterrupt lock(rt); module.unprotectCode(rt); return true; } # if defined(JS_CODEGEN_X64) // These checks aren't necessary, but, since we can, check anyway to make // sure we aren't covering up a real bug. if (!module.maybeHeap() || faultingAddress < module.maybeHeap() || faultingAddress >= module.maybeHeap() + AsmJSBufferProtectedSize) { return false; } const AsmJSHeapAccess *heapAccess = module.lookupHeapAccess(pc); if (!heapAccess) return false; // We now know that this is an out-of-bounds access made by an asm.js // load/store that we should handle. If this is a load, assign the // JS-defined result value to the destination register (ToInt32(undefined) // or ToNumber(undefined), determined by the type of the destination // register) and set the PC to the next op. Upon return from the handler, // execution will resume at this next PC. if (heapAccess->isLoad()) SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg()); *ppc += heapAccess->opLength(); return true; # else return false; # endif }
void JSCompartment::sweep(FreeOp *fop, bool releaseTypes) { JS_ASSERT(!activeAnalysis); JSRuntime *rt = runtimeFromMainThread(); { gcstats::MaybeAutoPhase ap(rt->gc.stats, !rt->isHeapCompacting(), gcstats::PHASE_SWEEP_TABLES_INNER_VIEWS); innerViews.sweep(rt); } { gcstats::MaybeAutoPhase ap(rt->gc.stats, !rt->isHeapCompacting(), gcstats::PHASE_SWEEP_TABLES_WRAPPER); sweepCrossCompartmentWrappers(); } /* Remove dead references held weakly by the compartment. */ sweepBaseShapeTable(); sweepInitialShapeTable(); { gcstats::MaybeAutoPhase ap(rt->gc.stats, !rt->isHeapCompacting(), gcstats::PHASE_SWEEP_TABLES_TYPE_OBJECT); sweepNewTypeObjectTable(newTypeObjects); sweepNewTypeObjectTable(lazyTypeObjects); } sweepCallsiteClones(); savedStacks_.sweep(rt); if (global_ && IsObjectAboutToBeFinalized(global_.unsafeGet())) { if (debugMode()) Debugger::detachAllDebuggersFromGlobal(fop, global_); global_.set(nullptr); } if (selfHostingScriptSource && IsObjectAboutToBeFinalized((JSObject **) selfHostingScriptSource.unsafeGet())) { selfHostingScriptSource.set(nullptr); } if (jitCompartment_) jitCompartment_->sweep(fop, this); /* * JIT code increments activeWarmUpCounter for any RegExpShared used by jit * code for the lifetime of the JIT script. Thus, we must perform * sweeping after clearing jit code. */ regExps.sweep(rt); if (debugScopes) debugScopes->sweep(rt); /* Finalize unreachable (key,value) pairs in all weak maps. */ WeakMapBase::sweepCompartment(this); /* Sweep list of native iterators. */ NativeIterator *ni = enumerators->next(); while (ni != enumerators) { JSObject *iterObj = ni->iterObj(); NativeIterator *next = ni->next(); if (gc::IsObjectAboutToBeFinalized(&iterObj)) ni->unlink(); ni = next; } }
~MinorCollectionTracer() { runtime->setNeedsBarrier(savedNeedsBarrier); for (ZonesIter zone(runtime); !zone.done(); zone.next()) zone->restoreNeedsBarrier(); }
void js_DestroyContext(JSContext *cx, JSDestroyContextMode mode) { JSRuntime *rt = cx->runtime; JS_AbortIfWrongThread(rt); JS_ASSERT(!cx->enumerators); #ifdef JS_THREADSAFE JS_ASSERT(cx->outstandingRequests == 0); #endif if (mode != JSDCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ DebugOnly<JSBool> callbackStatus = cxCallback(cx, JSCONTEXT_DESTROY); JS_ASSERT(callbackStatus); } } JS_LOCK_GC(rt); JS_REMOVE_LINK(&cx->link); bool last = !rt->hasContexts(); if (last || mode == JSDCM_FORCE_GC || mode == JSDCM_MAYBE_GC) { JS_ASSERT(!rt->gcRunning); #ifdef JS_THREADSAFE rt->gcHelperThread.waitBackgroundSweepEnd(); #endif JS_UNLOCK_GC(rt); if (last) { /* * Dump remaining type inference results first. This printing * depends on atoms still existing. */ { AutoLockGC lock(rt); for (CompartmentsIter c(rt); !c.done(); c.next()) c->types.print(cx, false); } /* Unpin all common atoms before final GC. */ js_FinishCommonAtoms(cx); /* Clear debugging state to remove GC roots. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->clearTraps(cx); JS_ClearAllWatchPoints(cx); js_GC(cx, NULL, GC_NORMAL, gcreason::LAST_CONTEXT); } else if (mode == JSDCM_FORCE_GC) { js_GC(cx, NULL, GC_NORMAL, gcreason::DESTROY_CONTEXT); } else if (mode == JSDCM_MAYBE_GC) { JS_MaybeGC(cx); } JS_LOCK_GC(rt); } #ifdef JS_THREADSAFE rt->gcHelperThread.waitBackgroundSweepEnd(); #endif JS_UNLOCK_GC(rt); Foreground::delete_(cx); }
void js::DestroyContext(JSContext *cx, DestroyContextMode mode) { JSRuntime *rt = cx->runtime(); JS_AbortIfWrongThread(rt); #ifdef JS_THREADSAFE if (cx->outstandingRequests != 0) MOZ_CRASH(); #endif #if (defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)) && defined(DEBUG) for (int i = 0; i < THING_ROOT_LIMIT; ++i) JS_ASSERT(cx->thingGCRooters[i] == NULL); #endif if (mode != DCM_NEW_FAILED) { if (JSContextCallback cxCallback = rt->cxCallback) { /* * JSCONTEXT_DESTROY callback is not allowed to fail and must * return true. */ JS_ALWAYS_TRUE(cxCallback(cx, JSCONTEXT_DESTROY, rt->cxCallbackData)); } } cx->remove(); bool last = !rt->hasContexts(); if (last) { JS_ASSERT(!rt->isHeapBusy()); /* * Dump remaining type inference results first. This printing * depends on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->types.print(cx, false); /* Off thread compilation and parsing depend on atoms still existing. */ for (CompartmentsIter c(rt); !c.done(); c.next()) CancelOffThreadIonCompile(c, NULL); WaitForOffThreadParsingToFinish(rt); #ifdef JS_WORKER_THREADS if (rt->workerThreadState) rt->workerThreadState->cleanup(rt); #endif /* Unpin all common names before final GC. */ FinishCommonNames(rt); /* Clear debugging state to remove GC roots. */ for (CompartmentsIter c(rt); !c.done(); c.next()) c->clearTraps(rt->defaultFreeOp()); JS_ClearAllWatchPoints(cx); /* Clear the statics table to remove GC roots. */ rt->staticStrings.finish(); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::LAST_CONTEXT); /* * Clear the self-hosted global and delete self-hosted classes *after* * GC, as finalizers for objects check for clasp->finalize during GC. */ rt->finishSelfHosting(); } else if (mode == DCM_FORCE_GC) { JS_ASSERT(!rt->isHeapBusy()); JS::PrepareForFullGC(rt); GC(rt, GC_NORMAL, JS::gcreason::DESTROY_CONTEXT); } js_delete_poison(cx); }
bool ForkJoinShared::check(ForkJoinSlice &slice) { JS_ASSERT(cx_->runtime->interrupt); if (abort_) return false; if (slice.isMainThread()) { // We are the main thread: therefore we must // (1) initiate the rendezvous; // (2) if GC was requested, reinvoke trigger // which will do various non-thread-safe // preparatory steps. We then invoke // a non-incremental GC manually. // (3) run the operation callback, which // would normally run the GC but // incrementally, which we do not want. JSRuntime *rt = cx_->runtime; // Calls to js::TriggerGC() should have been redirected to // requestGC(), and thus the gcIsNeeded flag is not set yet. JS_ASSERT(!rt->gcIsNeeded); if (gcRequested_ && rt->isHeapBusy()) { // Cannot call GCSlice when heap busy, so abort. Easier // right now to abort rather than prove it cannot arise, // and safer for short-term than asserting !isHeapBusy. setAbortFlag(false); records_->setCause(ParallelBailoutHeapBusy, NULL, NULL); return false; } // (1). Initiaize the rendezvous and record stack extents. AutoRendezvous autoRendezvous(slice); AutoMarkWorldStoppedForGC autoMarkSTWFlag(slice); slice.recordStackExtent(); AutoInstallForkJoinStackExtents extents(rt, &stackExtents_[0]); // (2). Note that because we are in a STW section, calls to // js::TriggerGC() etc will not re-invoke // ForkJoinSlice::requestGC(). triggerGCIfRequested(); // (2b) Run the GC if it is required. This would occur as // part of js_InvokeOperationCallback(), but we want to avoid // an incremental GC. if (rt->gcIsNeeded) { GC(rt, GC_NORMAL, gcReason_); } // (3). Invoke the callback and abort if it returns false. if (!js_InvokeOperationCallback(cx_)) { records_->setCause(ParallelBailoutInterrupt, NULL, NULL); setAbortFlag(true); return false; } return true; } else if (rendezvous_) { slice.recordStackExtent(); joinRendezvous(slice); } return true; }