static Res VMChunkInit(Chunk chunk, BootBlock boot) { VMChunk vmChunk; Addr overheadLimit; void *p; Res res; BT saMapped, saPages; /* chunk is supposed to be uninitialized, so don't check it. */ vmChunk = Chunk2VMChunk(chunk); AVERT(BootBlock, boot); res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN); if (res != ResOK) goto failSaMapped; saMapped = p; res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN); if (res != ResOK) goto failSaPages; saPages = p; overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot)); /* Put the page table as late as possible, as in VM systems we don't want */ /* to map it. */ res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize); if (res != ResOK) goto failAllocPageTable; chunk->pageTable = p; /* Map memory for the bit tables. */ if (vmChunk->overheadMappedLimit < overheadLimit) { overheadLimit = AddrAlignUp(overheadLimit, ChunkPageSize(chunk)); res = vmArenaMap(VMChunkVMArena(vmChunk), vmChunk->vm, vmChunk->overheadMappedLimit, overheadLimit); if (res != ResOK) goto failTableMap; vmChunk->overheadMappedLimit = overheadLimit; } SparseArrayInit(&vmChunk->pages, chunk->pageTable, sizeof(PageUnion), chunk->pages, saMapped, saPages, vmChunk->vm); return ResOK; /* .no-clean: No clean-ups needed for boot, as we will discard the chunk. */ failTableMap: failSaPages: failAllocPageTable: failSaMapped: return res; }
Size (BTSize)(Count n) { /* check that the expression used in rounding up doesn't overflow */ AVER(n+MPS_WORD_WIDTH-1 > n); return BTSize(n); }
void BTDestroy(BT bt, Arena arena, Count length) { AVER(bt != NULL); AVERT(Arena, arena); AVER(length > 0); ControlFree(arena, bt, BTSize(length)); }
Bool ChunkCheck(Chunk chunk) { CHECKS(Chunk, chunk); CHECKU(Arena, chunk->arena); CHECKL(chunk->serial < chunk->arena->chunkSerial); /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ CHECKL(TreeCheck(&chunk->chunkTree)); CHECKL(ChunkPagesToSize(chunk, 1) == ChunkPageSize(chunk)); CHECKL(ShiftCheck(ChunkPageShift(chunk))); CHECKL(chunk->base != (Addr)0); CHECKL(chunk->base < chunk->limit); /* check chunk structure is at its own base: see .chunk.at.base. */ CHECKL(chunk->base == (Addr)chunk); CHECKL((Addr)(chunk+1) <= chunk->limit); CHECKL(ChunkSizeToPages(chunk, ChunkSize(chunk)) == chunk->pages); /* check that the tables fit in the chunk */ CHECKL(chunk->allocBase <= chunk->pages); CHECKL(chunk->allocBase >= chunk->pageTablePages); CHECKD_NOSIG(BT, chunk->allocTable); /* check that allocTable is in the chunk overhead */ CHECKL((Addr)chunk->allocTable >= chunk->base); CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages)) <= PageIndexBase(chunk, chunk->allocBase)); /* check they don't overlap (knowing the order) */ CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages)) <= (Addr)chunk->pageTable); CHECKL(chunk->pageTable != NULL); CHECKL((Addr)chunk->pageTable >= chunk->base); CHECKL((Addr)&chunk->pageTable[chunk->pageTablePages] <= PageIndexBase(chunk, chunk->allocBase)); CHECKL(NONNEGATIVE(INDEX_OF_ADDR(chunk, (Addr)chunk->pageTable))); /* check there's enough space in the page table */ CHECKL(INDEX_OF_ADDR(chunk, AddrSub(chunk->limit, 1)) < chunk->pages); CHECKL(chunk->pageTablePages < chunk->pages); /* Could check the consistency of the tables, but not O(1). */ return TRUE; }
static Bool VMChunkCheck(VMChunk vmchunk) { Chunk chunk; CHECKS(VMChunk, vmchunk); chunk = VMChunk2Chunk(vmchunk); CHECKD(Chunk, chunk); CHECKD_NOSIG(VM, vmchunk->vm); /* <design/check/#hidden-type> */ CHECKL(VMAlign(vmchunk->vm) == ChunkPageSize(chunk)); CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable); CHECKD(SparseArray, &vmchunk->pages); /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck makes sure they're where they're expected to be (in the chunk). */ CHECKL(chunk->base < (Addr)vmchunk->pages.mapped); CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <= vmchunk->overheadMappedLimit); CHECKL(chunk->base < (Addr)vmchunk->pages.pages); CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <= vmchunk->overheadMappedLimit); /* .improve.check-table: Could check the consistency of the tables. */ return TRUE; }
ATTRIBUTE_UNUSED static Bool VMChunkCheck(VMChunk vmchunk) { Chunk chunk; CHECKS(VMChunk, vmchunk); chunk = VMChunk2Chunk(vmchunk); CHECKD(Chunk, chunk); CHECKD(VM, VMChunkVM(vmchunk)); CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize(VMChunkVM(vmchunk)))); CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable); CHECKD(SparseArray, &vmchunk->pages); /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck makes sure they're where they're expected to be (in the chunk). */ CHECKL(chunk->base < (Addr)vmchunk->pages.mapped); CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <= vmchunk->overheadMappedLimit); CHECKL(chunk->base < (Addr)vmchunk->pages.pages); CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <= vmchunk->overheadMappedLimit); /* .improve.check-table: Could check the consistency of the tables. */ return TRUE; }
Res BTCreate(BT *btReturn, Arena arena, Count length) { Res res; BT bt; void *p; AVER(btReturn != NULL); AVERT(Arena, arena); AVER(length > 0); res = ControlAlloc(&p, arena, BTSize(length), /* withReservoirPermit */ FALSE); if (res != ResOK) return res; bt = (BT)p; *btReturn = bt; return ResOK; }
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved, BootBlock boot) { Size size; Count pages; Shift pageShift; Size pageTableSize; Addr allocBase; void *p; Res res; /* chunk is supposed to be uninitialized, so don't check it. */ AVERT(Arena, arena); AVER(base != NULL); AVER(AddrIsAligned(base, ArenaGrainSize(arena))); AVER(base < limit); AVER(AddrIsAligned(limit, ArenaGrainSize(arena))); AVERT(BootBlock, boot); chunk->serial = (arena->chunkSerial)++; chunk->arena = arena; RingInit(&chunk->arenaRing); chunk->pageSize = ArenaGrainSize(arena); chunk->pageShift = pageShift = SizeLog2(chunk->pageSize); chunk->base = base; chunk->limit = limit; chunk->reserved = reserved; size = ChunkSize(chunk); /* .overhead.pages: Chunk overhead for the page allocation table. */ chunk->pages = pages = size >> pageShift; res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN); if (res != ResOK) goto failAllocTable; chunk->allocTable = p; pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize); chunk->pageTablePages = pageTableSize >> pageShift; res = Method(Arena, arena, chunkInit)(chunk, boot); if (res != ResOK) goto failClassInit; /* @@@@ Is BootAllocated always right? */ /* Last thing we BootAlloc'd is pageTable. We requested pageSize */ /* alignment, and pageTableSize is itself pageSize aligned, so */ /* BootAllocated should also be pageSize aligned. */ AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize)); chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift); /* Init allocTable after class init, because it might be mapped there. */ BTResRange(chunk->allocTable, 0, pages); /* Check that there is some usable address space remaining in the chunk. */ allocBase = PageIndexBase(chunk, chunk->allocBase); AVER(allocBase < chunk->limit); /* Add the chunk's free address space to the arena's freeLand, so that we can allocate from it. */ if (arena->hasFreeLand) { res = ArenaFreeLandInsert(arena, allocBase, chunk->limit); if (res != ResOK) goto failLandInsert; } TreeInit(&chunk->chunkTree); chunk->sig = ChunkSig; AVERT(Chunk, chunk); ArenaChunkInsert(arena, chunk); return ResOK; failLandInsert: Method(Arena, arena, chunkFinish)(chunk); /* .no-clean: No clean-ups needed past this point for boot, as we will discard the chunk. */ failClassInit: failAllocTable: return res; }