bool GCMarker::markDelayedChildren(SliceBudget& budget) { GCRuntime& gc = runtime()->gc; gcstats::AutoPhase ap(gc.stats, gc.state() == MARK, gcstats::PHASE_MARK_DELAYED); MOZ_ASSERT(unmarkedArenaStackTop); do { /* * If marking gets delayed at the same arena again, we must repeat * marking of its things. For that we pop arena from the stack and * clear its hasDelayedMarking flag before we begin the marking. */ ArenaHeader* aheader = unmarkedArenaStackTop; MOZ_ASSERT(aheader->hasDelayedMarking); MOZ_ASSERT(markLaterArenas); unmarkedArenaStackTop = aheader->getNextDelayedMarking(); aheader->unsetDelayedMarking(); markLaterArenas--; markDelayedChildren(aheader); budget.step(150); if (budget.isOverBudget()) return false; } while (unmarkedArenaStackTop); MOZ_ASSERT(!markLaterArenas); return true; }
void js::IterateZonesCompartmentsArenasCells(JSRuntime *rt, void *data, IterateZoneCallback zoneCallback, JSIterateCompartmentCallback compartmentCallback, IterateArenaCallback arenaCallback, IterateCellCallback cellCallback) { AutoPrepareForTracing prop(rt); for (ZonesIter zone(rt); !zone.done(); zone.next()) { (*zoneCallback)(rt, data, zone); for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) (*compartmentCallback)(rt, data, comp); for (size_t thingKind = 0; thingKind != FINALIZE_LIMIT; thingKind++) { JSGCTraceKind traceKind = MapAllocToTraceKind(AllocKind(thingKind)); size_t thingSize = Arena::thingSize(AllocKind(thingKind)); for (ArenaIter aiter(zone, AllocKind(thingKind)); !aiter.done(); aiter.next()) { ArenaHeader *aheader = aiter.get(); (*arenaCallback)(rt, data, aheader->getArena(), traceKind, thingSize); for (CellIterUnderGC iter(aheader); !iter.done(); iter.next()) (*cellCallback)(rt, data, iter.getCell(), traceKind, thingSize); } } } }
void GCMarker::processMarkStackOther(SliceBudget &budget, uintptr_t tag, uintptr_t addr) { if (tag == TypeTag) { ScanTypeObject(this, reinterpret_cast<types::TypeObject *>(addr)); } else if (tag == SavedValueArrayTag) { JS_ASSERT(!(addr & Cell::CellMask)); JSObject *obj = reinterpret_cast<JSObject *>(addr); HeapValue *vp, *end; if (restoreValueArray(obj, (void **)&vp, (void **)&end)) pushValueArray(obj, vp, end); else pushObject(obj); } else if (tag == IonCodeTag) { MarkChildren(this, reinterpret_cast<ion::IonCode *>(addr)); } else if (tag == ArenaTag) { ArenaHeader *aheader = reinterpret_cast<ArenaHeader *>(addr); AllocKind thingKind = aheader->getAllocKind(); size_t thingSize = Arena::thingSize(thingKind); for ( ; aheader; aheader = aheader->next) { Arena *arena = aheader->getArena(); FreeSpan firstSpan(aheader->getFirstFreeSpan()); const FreeSpan *span = &firstSpan; for (uintptr_t thing = arena->thingsStart(thingKind); ; thing += thingSize) { JS_ASSERT(thing <= arena->thingsEnd()); if (thing == span->first) { if (!span->hasNext()) break; thing = span->last; span = span->nextSpan(); } else { JSObject *object = reinterpret_cast<JSObject *>(thing); if (object->hasSingletonType() && object->markIfUnmarked(getMarkColor())) pushObject(object); budget.step(); } } if (budget.isOverBudget()) { pushArenaList(aheader); return; } } } #if JS_HAS_XML_SUPPORT else { JS_ASSERT(tag == XmlTag); MarkChildren(this, reinterpret_cast<JSXML *>(addr)); } #endif }
TenuredCell * ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind, AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc) { JSRuntime *rt = zone->runtimeFromAnyThread(); Maybe<AutoLockGC> maybeLock; // See if we can proceed without taking the GC lock. if (backgroundFinalizeState[thingKind] != BFS_DONE) maybeLock.emplace(rt); ArenaList &al = arenaLists[thingKind]; ArenaHeader *aheader = al.takeNextArena(); if (aheader) { // Empty arenas should be immediately freed. MOZ_ASSERT(!aheader->isEmpty()); return allocateFromArenaInner<HasFreeThings>(zone, aheader, thingKind); } // Parallel threads have their own ArenaLists, but chunks are shared; // if we haven't already, take the GC lock now to avoid racing. if (maybeLock.isNothing()) maybeLock.emplace(rt); Chunk *chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc); if (!chunk) return nullptr; // Although our chunk should definitely have enough space for another arena, // there are other valid reasons why Chunk::allocateArena() may fail. aheader = rt->gc.allocateArena(chunk, zone, thingKind, maybeLock.ref()); if (!aheader) return nullptr; MOZ_ASSERT(!maybeLock->wasUnlocked()); MOZ_ASSERT(al.isCursorAtEnd()); al.insertAtCursor(aheader); return allocateFromArenaInner<IsEmpty>(zone, aheader, thingKind); }
void GCMarker::reset() { color = BLACK; stack.reset(); MOZ_ASSERT(isMarkStackEmpty()); while (unmarkedArenaStackTop) { ArenaHeader* aheader = unmarkedArenaStackTop; MOZ_ASSERT(aheader->hasDelayedMarking); MOZ_ASSERT(markLaterArenas); unmarkedArenaStackTop = aheader->getNextDelayedMarking(); aheader->unsetDelayedMarking(); aheader->markOverflow = 0; aheader->allocatedDuringIncremental = 0; markLaterArenas--; } MOZ_ASSERT(isDrained()); MOZ_ASSERT(!markLaterArenas); }
/* * Tests whether w is a (possibly dead) GC thing. Returns CGCT_VALID and * details about the thing if so. On failure, returns the reason for rejection. */ static inline ConservativeGCTest IsAddressableGCThing(JSRuntime *rt, uintptr_t w, bool skipUncollectedCompartments, gc::AllocKind *thingKindPtr, ArenaHeader **arenaHeader, void **thing) { /* * We assume that the compiler never uses sub-word alignment to store * pointers and does not tag pointers on its own. Additionally, the value * representation for all values and the jsid representation for GC-things * do not touch the low two bits. Thus any word with the low two bits set * is not a valid GC-thing. */ JS_STATIC_ASSERT(JSID_TYPE_STRING == 0 && JSID_TYPE_OBJECT == 4); if (w & 0x3) return CGCT_LOWBITSET; /* * An object jsid has its low bits tagged. In the value representation on * 64-bit, the high bits are tagged. */ const uintptr_t JSID_PAYLOAD_MASK = ~uintptr_t(JSID_TYPE_MASK); #if JS_BITS_PER_WORD == 32 uintptr_t addr = w & JSID_PAYLOAD_MASK; #elif JS_BITS_PER_WORD == 64 uintptr_t addr = w & JSID_PAYLOAD_MASK & JSVAL_PAYLOAD_MASK; #endif Chunk *chunk = Chunk::fromAddress(addr); if (!rt->gcChunkSet.has(chunk)) return CGCT_NOTCHUNK; /* * We query for pointers outside the arena array after checking for an * allocated chunk. Such pointers are rare and we want to reject them * after doing more likely rejections. */ if (!Chunk::withinArenasRange(addr)) return CGCT_NOTARENA; /* If the arena is not currently allocated, don't access the header. */ size_t arenaOffset = Chunk::arenaIndex(addr); if (chunk->decommittedArenas.get(arenaOffset)) return CGCT_FREEARENA; ArenaHeader *aheader = &chunk->arenas[arenaOffset].aheader; if (!aheader->allocated()) return CGCT_FREEARENA; if (skipUncollectedCompartments && !aheader->zone->isCollecting()) return CGCT_OTHERCOMPARTMENT; AllocKind thingKind = aheader->getAllocKind(); uintptr_t offset = addr & ArenaMask; uintptr_t minOffset = Arena::firstThingOffset(thingKind); if (offset < minOffset) return CGCT_NOTARENA; /* addr can point inside the thing so we must align the address. */ uintptr_t shift = (offset - minOffset) % Arena::thingSize(thingKind); addr -= shift; if (thing) *thing = reinterpret_cast<void *>(addr); if (arenaHeader) *arenaHeader = aheader; if (thingKindPtr) *thingKindPtr = thingKind; return CGCT_VALID; }