static Res VMChunkInit(Chunk chunk, BootBlock boot) { VMChunk vmChunk; Addr overheadLimit; void *p; Res res; BT saMapped, saPages; /* chunk is supposed to be uninitialized, so don't check it. */ vmChunk = Chunk2VMChunk(chunk); AVERT(BootBlock, boot); res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN); if (res != ResOK) goto failSaMapped; saMapped = p; res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN); if (res != ResOK) goto failSaPages; saPages = p; overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot)); /* Put the page table as late as possible, as in VM systems we don't want */ /* to map it. */ res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize); if (res != ResOK) goto failAllocPageTable; chunk->pageTable = p; /* Map memory for the bit tables. */ if (vmChunk->overheadMappedLimit < overheadLimit) { overheadLimit = AddrAlignUp(overheadLimit, ChunkPageSize(chunk)); res = vmArenaMap(VMChunkVMArena(vmChunk), vmChunk->vm, vmChunk->overheadMappedLimit, overheadLimit); if (res != ResOK) goto failTableMap; vmChunk->overheadMappedLimit = overheadLimit; } SparseArrayInit(&vmChunk->pages, chunk->pageTable, sizeof(PageUnion), chunk->pages, saMapped, saPages, vmChunk->vm); return ResOK; /* .no-clean: No clean-ups needed for boot, as we will discard the chunk. */ failTableMap: failSaPages: failAllocPageTable: failSaMapped: return res; }
Bool ChunkCheck(Chunk chunk) { CHECKS(Chunk, chunk); CHECKU(Arena, chunk->arena); CHECKL(chunk->serial < chunk->arena->chunkSerial); /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ CHECKL(TreeCheck(&chunk->chunkTree)); CHECKL(ChunkPagesToSize(chunk, 1) == ChunkPageSize(chunk)); CHECKL(ShiftCheck(ChunkPageShift(chunk))); CHECKL(chunk->base != (Addr)0); CHECKL(chunk->base < chunk->limit); /* check chunk structure is at its own base: see .chunk.at.base. */ CHECKL(chunk->base == (Addr)chunk); CHECKL((Addr)(chunk+1) <= chunk->limit); CHECKL(ChunkSizeToPages(chunk, ChunkSize(chunk)) == chunk->pages); /* check that the tables fit in the chunk */ CHECKL(chunk->allocBase <= chunk->pages); CHECKL(chunk->allocBase >= chunk->pageTablePages); CHECKD_NOSIG(BT, chunk->allocTable); /* check that allocTable is in the chunk overhead */ CHECKL((Addr)chunk->allocTable >= chunk->base); CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages)) <= PageIndexBase(chunk, chunk->allocBase)); /* check they don't overlap (knowing the order) */ CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages)) <= (Addr)chunk->pageTable); CHECKL(chunk->pageTable != NULL); CHECKL((Addr)chunk->pageTable >= chunk->base); CHECKL((Addr)&chunk->pageTable[chunk->pageTablePages] <= PageIndexBase(chunk, chunk->allocBase)); CHECKL(NONNEGATIVE(INDEX_OF_ADDR(chunk, (Addr)chunk->pageTable))); /* check there's enough space in the page table */ CHECKL(INDEX_OF_ADDR(chunk, AddrSub(chunk->limit, 1)) < chunk->pages); CHECKL(chunk->pageTablePages < chunk->pages); /* Could check the consistency of the tables, but not O(1). */ return TRUE; }
static Bool VMChunkCheck(VMChunk vmchunk) { Chunk chunk; CHECKS(VMChunk, vmchunk); chunk = VMChunk2Chunk(vmchunk); CHECKD(Chunk, chunk); CHECKD_NOSIG(VM, vmchunk->vm); /* <design/check/#hidden-type> */ CHECKL(VMAlign(vmchunk->vm) == ChunkPageSize(chunk)); CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable); CHECKD(SparseArray, &vmchunk->pages); /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck makes sure they're where they're expected to be (in the chunk). */ CHECKL(chunk->base < (Addr)vmchunk->pages.mapped); CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <= vmchunk->overheadMappedLimit); CHECKL(chunk->base < (Addr)vmchunk->pages.pages); CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <= vmchunk->overheadMappedLimit); /* .improve.check-table: Could check the consistency of the tables. */ return TRUE; }
ATTRIBUTE_UNUSED static Bool VMChunkCheck(VMChunk vmchunk) { Chunk chunk; CHECKS(VMChunk, vmchunk); chunk = VMChunk2Chunk(vmchunk); CHECKD(Chunk, chunk); CHECKD(VM, VMChunkVM(vmchunk)); CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize(VMChunkVM(vmchunk)))); CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable); CHECKD(SparseArray, &vmchunk->pages); /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck makes sure they're where they're expected to be (in the chunk). */ CHECKL(chunk->base < (Addr)vmchunk->pages.mapped); CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <= vmchunk->overheadMappedLimit); CHECKL(chunk->base < (Addr)vmchunk->pages.pages); CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <= vmchunk->overheadMappedLimit); /* .improve.check-table: Could check the consistency of the tables. */ return TRUE; }