Tract TractOfBaseAddr(Arena arena, Addr addr) { Tract tract = NULL; Bool found; AVERT_CRITICAL(Arena, arena); AVER_CRITICAL(AddrIsAligned(addr, ArenaGrainSize(arena))); /* Check first in the cache, see <design/arena/#tract.cache>. */ if (arena->lastTractBase == addr) { tract = arena->lastTract; } else { found = TractOfAddr(&tract, arena, addr); AVER_CRITICAL(found); } AVER_CRITICAL(TractBase(tract) == addr); return tract; }
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved, BootBlock boot) { Size size; Count pages; Shift pageShift; Size pageTableSize; Addr allocBase; void *p; Res res; /* chunk is supposed to be uninitialized, so don't check it. */ AVERT(Arena, arena); AVER(base != NULL); AVER(AddrIsAligned(base, ArenaGrainSize(arena))); AVER(base < limit); AVER(AddrIsAligned(limit, ArenaGrainSize(arena))); AVERT(BootBlock, boot); chunk->serial = (arena->chunkSerial)++; chunk->arena = arena; RingInit(&chunk->arenaRing); chunk->pageSize = ArenaGrainSize(arena); chunk->pageShift = pageShift = SizeLog2(chunk->pageSize); chunk->base = base; chunk->limit = limit; chunk->reserved = reserved; size = ChunkSize(chunk); /* .overhead.pages: Chunk overhead for the page allocation table. */ chunk->pages = pages = size >> pageShift; res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN); if (res != ResOK) goto failAllocTable; chunk->allocTable = p; pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize); chunk->pageTablePages = pageTableSize >> pageShift; res = Method(Arena, arena, chunkInit)(chunk, boot); if (res != ResOK) goto failClassInit; /* @@@@ Is BootAllocated always right? */ /* Last thing we BootAlloc'd is pageTable. We requested pageSize */ /* alignment, and pageTableSize is itself pageSize aligned, so */ /* BootAllocated should also be pageSize aligned. */ AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize)); chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift); /* Init allocTable after class init, because it might be mapped there. */ BTResRange(chunk->allocTable, 0, pages); /* Check that there is some usable address space remaining in the chunk. */ allocBase = PageIndexBase(chunk, chunk->allocBase); AVER(allocBase < chunk->limit); /* Add the chunk's free address space to the arena's freeLand, so that we can allocate from it. */ if (arena->hasFreeLand) { res = ArenaFreeLandInsert(arena, allocBase, chunk->limit); if (res != ResOK) goto failLandInsert; } TreeInit(&chunk->chunkTree); chunk->sig = ChunkSig; AVERT(Chunk, chunk); ArenaChunkInsert(arena, chunk); return ResOK; failLandInsert: Method(Arena, arena, chunkFinish)(chunk); /* .no-clean: No clean-ups needed past this point for boot, as we will discard the chunk. */ failClassInit: failAllocTable: return res; }
static Res MVTInit(Pool pool, Arena arena, PoolClass klass, ArgList args) { Size align = MVT_ALIGN_DEFAULT; Size minSize = MVT_MIN_SIZE_DEFAULT; Size meanSize = MVT_MEAN_SIZE_DEFAULT; Size maxSize = MVT_MAX_SIZE_DEFAULT; Count reserveDepth = MVT_RESERVE_DEPTH_DEFAULT; Count fragLimit = MVT_FRAG_LIMIT_DEFAULT; Size reuseSize, fillSize; Count abqDepth; MVT mvt; Res res; ArgStruct arg; AVER(pool != NULL); AVERT(Arena, arena); AVERT(ArgList, args); UNUSED(klass); /* used for debug pools only */ if (ArgPick(&arg, args, MPS_KEY_ALIGN)) align = arg.val.align; if (ArgPick(&arg, args, MPS_KEY_MIN_SIZE)) minSize = arg.val.size; if (ArgPick(&arg, args, MPS_KEY_MEAN_SIZE)) meanSize = arg.val.size; if (ArgPick(&arg, args, MPS_KEY_MAX_SIZE)) maxSize = arg.val.size; if (ArgPick(&arg, args, MPS_KEY_MVT_RESERVE_DEPTH)) reserveDepth = arg.val.count; if (ArgPick(&arg, args, MPS_KEY_MVT_FRAG_LIMIT)) { /* pending complete fix for job003319 */ AVER(0 <= arg.val.d); AVER(arg.val.d <= 1); fragLimit = (Count)(arg.val.d * 100); } AVERT(Align, align); /* This restriction on the alignment is necessary because of the use of a Freelist to store the free address ranges in low-memory situations. <design/freelist#.impl.grain.align>. */ AVER(AlignIsAligned(align, FreelistMinimumAlignment)); AVER(align <= ArenaGrainSize(arena)); AVER(0 < minSize); AVER(minSize <= meanSize); AVER(meanSize <= maxSize); AVER(reserveDepth > 0); AVER(fragLimit <= 100); /* TODO: More parameter checks possible? */ /* see <design/poolmvt#.arch.parameters> */ fillSize = SizeArenaGrains(maxSize, arena); /* see <design/poolmvt#.arch.fragmentation.internal> */ reuseSize = 2 * fillSize; abqDepth = (reserveDepth * meanSize + reuseSize - 1) / reuseSize; /* keep the abq from being useless */ if (abqDepth < 3) abqDepth = 3; res = NextMethod(Pool, MVTPool, init)(pool, arena, klass, args); if (res != ResOK) goto failNextInit; mvt = CouldBeA(MVTPool, pool); res = LandInit(MVTFreePrimary(mvt), CLASS(CBSFast), arena, align, mvt, mps_args_none); if (res != ResOK) goto failFreePrimaryInit; res = LandInit(MVTFreeSecondary(mvt), CLASS(Freelist), arena, align, mvt, mps_args_none); if (res != ResOK) goto failFreeSecondaryInit; MPS_ARGS_BEGIN(foArgs) { MPS_ARGS_ADD(foArgs, FailoverPrimary, MVTFreePrimary(mvt)); MPS_ARGS_ADD(foArgs, FailoverSecondary, MVTFreeSecondary(mvt)); res = LandInit(MVTFreeLand(mvt), CLASS(Failover), arena, align, mvt, foArgs); } MPS_ARGS_END(foArgs); if (res != ResOK) goto failFreeLandInit; res = ABQInit(arena, MVTABQ(mvt), (void *)mvt, abqDepth, sizeof(RangeStruct)); if (res != ResOK) goto failABQInit; pool->alignment = align; pool->alignShift = SizeLog2(pool->alignment); mvt->reuseSize = reuseSize; mvt->fillSize = fillSize; mvt->abqOverflow = FALSE; mvt->minSize = minSize; mvt->meanSize = meanSize; mvt->maxSize = maxSize; mvt->fragLimit = fragLimit; mvt->splinter = FALSE; mvt->splinterBase = (Addr)0; mvt->splinterLimit = (Addr)0; /* accounting */ mvt->size = 0; mvt->allocated = 0; mvt->available = 0; mvt->availLimit = 0; mvt->unavailable = 0; /* meters*/ METER_INIT(mvt->segAllocs, "segment allocations", (void *)mvt); METER_INIT(mvt->segFrees, "segment frees", (void *)mvt); METER_INIT(mvt->bufferFills, "buffer fills", (void *)mvt); METER_INIT(mvt->bufferEmpties, "buffer empties", (void *)mvt); METER_INIT(mvt->poolFrees, "pool frees", (void *)mvt); METER_INIT(mvt->poolSize, "pool size", (void *)mvt); METER_INIT(mvt->poolAllocated, "pool allocated", (void *)mvt); METER_INIT(mvt->poolAvailable, "pool available", (void *)mvt); METER_INIT(mvt->poolUnavailable, "pool unavailable", (void *)mvt); METER_INIT(mvt->poolUtilization, "pool utilization", (void *)mvt); METER_INIT(mvt->finds, "ABQ finds", (void *)mvt); METER_INIT(mvt->overflows, "ABQ overflows", (void *)mvt); METER_INIT(mvt->underflows, "ABQ underflows", (void *)mvt); METER_INIT(mvt->refills, "ABQ refills", (void *)mvt); METER_INIT(mvt->refillPushes, "ABQ refill pushes", (void *)mvt); METER_INIT(mvt->returns, "ABQ returns", (void *)mvt); METER_INIT(mvt->perfectFits, "perfect fits", (void *)mvt); METER_INIT(mvt->firstFits, "first fits", (void *)mvt); METER_INIT(mvt->secondFits, "second fits", (void *)mvt); METER_INIT(mvt->failures, "failures", (void *)mvt); METER_INIT(mvt->emergencyContingencies, "emergency contingencies", (void *)mvt); METER_INIT(mvt->fragLimitContingencies, "fragmentation limit contingencies", (void *)mvt); METER_INIT(mvt->contingencySearches, "contingency searches", (void *)mvt); METER_INIT(mvt->contingencyHardSearches, "contingency hard searches", (void *)mvt); METER_INIT(mvt->splinters, "splinters", (void *)mvt); METER_INIT(mvt->splintersUsed, "splinters used", (void *)mvt); METER_INIT(mvt->splintersDropped, "splinters dropped", (void *)mvt); METER_INIT(mvt->sawdust, "sawdust", (void *)mvt); METER_INIT(mvt->exceptions, "exceptions", (void *)mvt); METER_INIT(mvt->exceptionSplinters, "exception splinters", (void *)mvt); METER_INIT(mvt->exceptionReturns, "exception returns", (void *)mvt); SetClassOfPoly(pool, CLASS(MVTPool)); mvt->sig = MVTSig; AVERC(MVT, mvt); EVENT6(PoolInitMVT, pool, minSize, meanSize, maxSize, reserveDepth, fragLimit); return ResOK; failABQInit: LandFinish(MVTFreeLand(mvt)); failFreeLandInit: LandFinish(MVTFreeSecondary(mvt)); failFreeSecondaryInit: LandFinish(MVTFreePrimary(mvt)); failFreePrimaryInit: NextMethod(Inst, MVTPool, finish)(MustBeA(Inst, pool)); failNextInit: AVER(res != ResOK); return res; }
Addr TractLimit(Tract tract, Arena arena) { AVERT_CRITICAL(Tract, tract); /* .tract.critical */ AVERT_CRITICAL(Arena, arena); return AddrAdd(TractBase(tract), ArenaGrainSize(arena)); }
/* VMChunkCreate -- create a chunk * * chunkReturn, return parameter for the created chunk. * vmArena, the parent VMArena. * size, approximate amount of virtual address that the chunk should reserve. */ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size) { Arena arena; Res res; Addr base, limit, chunkStructLimit; VMStruct vmStruct; VM vm = &vmStruct; BootBlockStruct bootStruct; BootBlock boot = &bootStruct; VMChunk vmChunk; void *p; AVER(chunkReturn != NULL); AVERT(VMArena, vmArena); arena = VMArena2Arena(vmArena); AVER(size > 0); res = VMInit(vm, size, ArenaGrainSize(arena), vmArena->vmParams); if (res != ResOK) goto failVMInit; base = VMBase(vm); limit = VMLimit(vm); res = BootBlockInit(boot, (void *)base, (void *)limit); if (res != ResOK) goto failBootInit; /* Allocate and map the descriptor. */ /* See <design/arena/>.@@@@ */ res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN); if (res != ResOK) goto failChunkAlloc; vmChunk = p; /* Calculate the limit of the grain where the chunkStruct resides. */ chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), ArenaGrainSize(arena)); res = vmArenaMap(vmArena, vm, base, chunkStructLimit); if (res != ResOK) goto failChunkMap; vmChunk->overheadMappedLimit = chunkStructLimit; /* Copy VM descriptor into its place in the chunk. */ VMCopy(VMChunkVM(vmChunk), vm); res = ChunkInit(VMChunk2Chunk(vmChunk), arena, base, limit, VMReserved(VMChunkVM(vmChunk)), boot); if (res != ResOK) goto failChunkInit; BootBlockFinish(boot); vmChunk->sig = VMChunkSig; AVERT(VMChunk, vmChunk); *chunkReturn = VMChunk2Chunk(vmChunk); return ResOK; failChunkInit: VMUnmap(vm, VMBase(vm), chunkStructLimit); failChunkMap: failChunkAlloc: failBootInit: VMFinish(vm); failVMInit: return res; }